diff --git a/.github/ISSUE_TEMPLATE/🐛-bug-report.md b/.github/ISSUE_TEMPLATE/🐛-bug-report.md new file mode 100644 index 0000000000..3553cbda8a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/🐛-bug-report.md @@ -0,0 +1,32 @@ +--- +name: "\U0001F41B Bug report" +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + + + +Dify version: Cloud | Self Host + +## Steps To Reproduce + + +1. +2. + + +## The current behavior + + +## The expected behavior diff --git a/.github/ISSUE_TEMPLATE/🚀-feature-request.md b/.github/ISSUE_TEMPLATE/🚀-feature-request.md new file mode 100644 index 0000000000..34f39d2fa2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/🚀-feature-request.md @@ -0,0 +1,20 @@ +--- +name: "\U0001F680 Feature request" +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/🤔-questions-and-help.md b/.github/ISSUE_TEMPLATE/🤔-questions-and-help.md new file mode 100644 index 0000000000..df93653cac --- /dev/null +++ b/.github/ISSUE_TEMPLATE/🤔-questions-and-help.md @@ -0,0 +1,10 @@ +--- +name: "\U0001F914 Questions and Help" +about: Ask a usage or consultation question +title: '' +labels: '' +assignees: '' + +--- + + diff --git a/CONTRIBUTING_JA.md b/CONTRIBUTING_JA.md new file mode 100644 index 0000000000..f83c4b3fc3 --- /dev/null +++ b/CONTRIBUTING_JA.md @@ -0,0 +1,55 @@ +# コントリビュート + +[Dify](https://dify.ai) に興味を持ち、貢献したいと思うようになったことに感謝します!始める前に、 +[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)を読み、 +[既存の問題](https://github.com/langgenius/langgenius-gateway/issues)をチェックしてください。 +本ドキュメントは、[Dify](https://dify.ai) をビルドしてテストするための開発環境の構築方法を説明するものです。 + +### 依存関係のインストール + +[Dify](https://dify.ai)をビルドするには、お使いのマシンに以下の依存関係をインストールし、設定する必要があります: + +- [Git](http://git-scm.com/) +- [Docker](https://www.docker.com/) +- [Docker Compose](https://docs.docker.com/compose/install/) +- [Node.js v18.x (LTS)](http://nodejs.org) +- [npm](https://www.npmjs.com/) バージョン 8.x.x もしくは [Yarn](https://yarnpkg.com/) +- [Python](https://www.python.org/) バージョン 3.10.x + +## ローカル開発 + +開発環境を構築するには、プロジェクトの git リポジトリをフォークし、適切なパッケージマネージャを使用してバックエンドとフロントエンドの依存関係をインストールし、docker-compose スタックを実行するように作成します。 + +### リポジトリのフォーク + +[リポジトリ](https://github.com/langgenius/dify) をフォークする必要があります。 + +### リポジトリのクローン + +GitHub でフォークしたリポジトリのクローンを作成する: + +``` +git clone git@github.com:/dify.git +``` + +### バックエンドのインストール + +バックエンドアプリケーションのインストール方法については、[Backend README](api/README.md) を参照してください。 + +### フロントエンドのインストール + +フロントエンドアプリケーションのインストール方法については、[Frontend README](web/README.md) を参照してください。 + +### ブラウザで dify にアクセス + +[Dify](https://dify.ai) をローカル環境で見ることができるようになりました [http://localhost:3000](http://localhost:3000)。 + +## プルリクエストの作成 + +変更後、プルリクエスト (PR) をオープンしてください。プルリクエストを提出すると、Dify チーム/コミュニティの他の人があなたと一緒にそれをレビューします。 + +マージコンフリクトなどの問題が発生したり、プルリクエストの開き方がわからなくなったりしませんでしたか? [GitHub's pull request tutorial](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests) で、マージコンフリクトやその他の問題を解決する方法をチェックしてみてください。あなたの PR がマージされると、[コントリビュータチャート](https://github.com/langgenius/langgenius-gateway/graphs/contributors)にコントリビュータとして誇らしげに掲載されます。 + +## コミュニティチャンネル + +お困りですか?何か質問がありますか? [Discord Community サーバ](https://discord.gg/AhzKf7dNgk)に参加してください。私たちがお手伝いします! diff --git a/README.md b/README.md index e80559bcf7..9610cda8ae 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,15 @@ ![](./images/describe-en.png)

English | - 简体中文 + 简体中文 | + 日本語

[Website](https://dify.ai) • [Docs](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7) +Vote for us on Product Hunt ↓ +Product Hunt Badge + **Dify** is an easy-to-use LLMOps platform designed to empower more people to create sustainable, AI-native applications. With visual orchestration for various application types, Dify offers out-of-the-box, ready-to-use applications that can also serve as Backend-as-a-Service APIs. Unify your development process with one API for plugins and datasets integration, and streamline your operations using a single interface for prompt engineering, visual analytics, and continuous improvement. Applications created with Dify include: diff --git a/README_CN.md b/README_CN.md index c72c03bd6a..e37399343e 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,12 +1,16 @@ ![](./images/describe-cn.jpg)

English | - 简体中文 + 简体中文 | + 日本語

[官方网站](https://dify.ai) • [文档](https://docs.dify.ai/v/zh-hans) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7) +在 Product Hunt 上投我们一票吧 ↓ +Product Hunt Badge + **Dify** 是一个易用的 LLMOps 平台,旨在让更多人可以创建可持续运营的原生 AI 应用。Dify 提供多种类型应用的可视化编排,应用可开箱即用,也能以“后端即服务”的 API 提供服务。 通过 Dify 创建的应用包含了: diff --git a/README_JA.md b/README_JA.md new file mode 100644 index 0000000000..fed796d4f1 --- /dev/null +++ b/README_JA.md @@ -0,0 +1,120 @@ +![](./images/describe-en.png) +

+ English | + 简体中文 | + 日本語 +

+ +[Web サイト](https://dify.ai) • [ドキュメント](https://docs.dify.ai) • [Twitter](https://twitter.com/dify_ai) • [Discord](https://discord.gg/FngNHpbcY7) + +Product Huntで私たちに投票してください ↓ +Product Hunt Badge + + +**Dify** は、より多くの人々が持続可能な AI ネイティブアプリケーションを作成できるように設計された、使いやすい LLMOps プラットフォームです。様々なアプリケーションタイプに対応したビジュアルオーケストレーションにより Dify は Backend-as-a-Service API としても機能する、すぐに使えるアプリケーションを提供します。プラグインやデータセットを統合するための1つの API で開発プロセスを統一し、プロンプトエンジニアリング、ビジュアル分析、継続的な改善のための1つのインターフェイスを使って業務を合理化します。 + +Difyで作成したアプリケーションは以下の通りです: + +フォームモードとチャット会話モードをサポートする、すぐに使える Web サイト +プラグイン機能、コンテキストの強化などを網羅する単一の API により、バックエンドのコーディングの手間を省きます。 +アプリケーションの視覚的なデータ分析、ログレビュー、アノテーションが可能です。 +Dify は LangChain と互換性があり、複数の LLM を徐々にサポートします: + +- GPT 3 (text-davinci-003) +- GPT 3.5 Turbo(ChatGPT) +- GPT-4 + +## クラウドサービスの利用 + +[Dify.ai](https://dify.ai) をご覧ください + +## Community Edition のインストール + +### システム要件 + +Dify をインストールする前に、お使いのマシンが以下の最低システム要件を満たしていることを確認してください: + +- CPU >= 1 Core +- RAM >= 4GB + +### クイックスタート + +Dify サーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml) ファイルを実行することです。インストールコマンドを実行する前に、[Docker](https://docs.docker.com/get-docker/) と [Docker Compose](https://docs.docker.com/compose/install/) がお使いのマシンにインストールされていることを確認してください: + +```bash +cd docker +docker-compose up -d +``` + +実行後、ブラウザで [http://localhost/install](http://localhost/install) にアクセスし、初期化インストール作業を開始することができます。 + +### 構成 + +カスタマイズが必要な場合は、[docker-compose.yml](docker/docker-compose.yaml) ファイルのコメントを参照し、手動で環境設定をお願いします。変更後、再度 'docker-compose up -d' を実行してください。 + +## ロードマップ + +開発中の機能: + +- **データセット**, Notionやウェブページからのコンテンツ同期など、より多くのデータセットをサポートします +テキスト、ウェブページ、さらには Notion コンテンツなど、より多くのデータセットをサポートする予定です。ユーザーは、自分のデータソースをもとに AI アプリケーションを構築することができます。 +- **プラグイン**, アプリケーションに ChatGPT プラグイン標準のプラグインを導入する、または Dify 制作のプラグインを利用する +今後、ChatGPT 規格に準拠したプラグインや、ディファイ独自のプラグインを公開し、より多くの機能をアプリケーションで実現できるようにします。 +- **オープンソースモデル**, 例えばモデルプロバイダーとして Llama を採用したり、さらにファインチューニングを行う +Llama のような優れたオープンソースモデルを、私たちのプラットフォームのモデルオプションとして提供したり、さらなる微調整のために使用したりすることで、協力していきます。 + + +## Q&A + +**Q: Dify で何ができるのか?** + +A: Dify はシンプルでパワフルな LLM 開発・運用ツールです。商用グレードのアプリケーション、パーソナルアシスタントを構築するために使用することができます。独自のアプリケーションを開発したい場合、LangDifyGenius は OpenAI と統合する際のバックエンド作業を省き、視覚的な操作機能を提供し、GPT モデルを継続的に改善・訓練することが可能です。 + +**Q: Dify を使って、自分のモデルを「トレーニング」するにはどうすればいいのでしょうか?** + +A: プロンプトエンジニアリング、コンテキスト拡張、ファインチューニングからなる価値あるアプリケーションです。プロンプトとプログラミング言語を組み合わせたハイブリッドプログラミングアプローチ(テンプレートエンジンのようなもの)で、長文の埋め込みやユーザー入力の YouTube 動画からの字幕取り込みなどを簡単に実現し、これらはすべて LLM が処理するコンテキストとして提出される予定です。また、アプリケーションの操作性を重視し、ユーザーがアプリケーションを使用する際に生成したデータを分析、アノテーション、継続的なトレーニングに利用できるようにしました。適切なツールがなければ、これらのステップに時間がかかることがあります。 + +**Q: 自分でアプリケーションを作りたい場合、何を準備すればよいですか?** + +A: すでに OpenAI API Key をお持ちだと思いますが、お持ちでない場合はご登録ください。もし、すでにトレーニングのコンテキストとなるコンテンツをお持ちでしたら、それは素晴らしいことです! + +**Q: インターフェイスにどの言語が使えますか?** + +A: 現在、英語と中国語に対応しており、言語パックを寄贈することも可能です。 + +## Star ヒストリー + +[![Star History Chart](https://api.star-history.com/svg?repos=langgenius/dify&type=Date)](https://star-history.com/#langgenius/dify&Date) + +## お問合せ + +ご質問、ご提案、パートナーシップに関するお問い合わせは、以下のチャンネルからお気軽にご連絡ください: + +- GitHub Repo で Issue や PR を提出する +- [Discord](https://discord.gg/FngNHpbcY7) コミュニティで議論に参加する。 +- hello@dify.ai にメールを送信します + +私たちは、皆様のお手伝いをさせていただき、より楽しく、より便利な AI アプリケーションを一緒に作っていきたいと思っています! + +## コントリビュート + +適切なレビューを行うため、コミットへの直接アクセスが可能なコントリビュータを含むすべてのコードコントリビュータは、プルリクエストで提出し、マージされる前にコア開発チームによって承認される必要があります。 + +私たちはすべてのプルリクエストを歓迎します!協力したい方は、[コントリビューションガイド](CONTRIBUTING.md) をチェックしてみてください。 + +## セキュリティ + +プライバシー保護のため、GitHub へのセキュリティ問題の投稿は避けてください。代わりに、あなたの質問を security@dify.ai に送ってください。より詳細な回答を提供します。 + +## 引用 + +本ソフトウェアは、以下のオープンソースソフトウェアを使用しています: + +- Chase, H. (2022). LangChain [Computer software]. https://github.com/hwchase17/langchain +- Liu, J. (2022). LlamaIndex [Computer software]. doi: 10.5281/zenodo.1234. + +詳しくは、各ソフトウェアの公式サイトまたはライセンス文をご参照ください。 + +## ライセンス + +このリポジトリは、[Dify Open Source License](LICENSE) のもとで利用できます。 diff --git a/api/.env.example b/api/.env.example index 5f307dc106..4e2d76f810 100644 --- a/api/.env.example +++ b/api/.env.example @@ -14,7 +14,7 @@ CONSOLE_URL=http://127.0.0.1:5001 API_URL=http://127.0.0.1:5001 # Web APP base URL -APP_URL=http://127.0.0.1:5001 +APP_URL=http://127.0.0.1:3000 # celery configuration CELERY_BROKER_URL=redis://:difyai123456@localhost:6379/1 diff --git a/api/README.md b/api/README.md index 97f09fc700..3bdb299479 100644 --- a/api/README.md +++ b/api/README.md @@ -33,3 +33,4 @@ flask run --host 0.0.0.0 --port=5001 --debug ``` 7. Setup your application by visiting http://localhost:5001/console/api/setup or other apis... +8. If you need to debug local async processing, you can run `celery -A app.celery worker`, celery can do dataset importing and other async tasks. \ No newline at end of file diff --git a/api/config.py b/api/config.py index 04c44f2447..f81527da61 100644 --- a/api/config.py +++ b/api/config.py @@ -21,9 +21,11 @@ DEFAULTS = { 'REDIS_HOST': 'localhost', 'REDIS_PORT': '6379', 'REDIS_DB': '0', + 'REDIS_USE_SSL': 'False', 'SESSION_REDIS_HOST': 'localhost', 'SESSION_REDIS_PORT': '6379', 'SESSION_REDIS_DB': '2', + 'SESSION_REDIS_USE_SSL': 'False', 'OAUTH_REDIRECT_PATH': '/console/api/oauth/authorize', 'OAUTH_REDIRECT_INDEX_PATH': '/', 'CONSOLE_URL': 'https://cloud.dify.ai', @@ -44,6 +46,8 @@ DEFAULTS = { 'CELERY_BACKEND': 'database', 'PDF_PREVIEW': 'True', 'LOG_LEVEL': 'INFO', + 'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False', + 'DEFAULT_LLM_PROVIDER': 'openai' } @@ -105,14 +109,18 @@ class Config: # redis settings self.REDIS_HOST = get_env('REDIS_HOST') self.REDIS_PORT = get_env('REDIS_PORT') + self.REDIS_USERNAME = get_env('REDIS_USERNAME') self.REDIS_PASSWORD = get_env('REDIS_PASSWORD') self.REDIS_DB = get_env('REDIS_DB') + self.REDIS_USE_SSL = get_bool_env('REDIS_USE_SSL') # session redis settings self.SESSION_REDIS_HOST = get_env('SESSION_REDIS_HOST') self.SESSION_REDIS_PORT = get_env('SESSION_REDIS_PORT') + self.SESSION_REDIS_USERNAME = get_env('SESSION_REDIS_USERNAME') self.SESSION_REDIS_PASSWORD = get_env('SESSION_REDIS_PASSWORD') self.SESSION_REDIS_DB = get_env('SESSION_REDIS_DB') + self.SESSION_REDIS_USE_SSL = get_bool_env('SESSION_REDIS_USE_SSL') # storage settings self.STORAGE_TYPE = get_env('STORAGE_TYPE') @@ -165,10 +173,18 @@ class Config: self.CELERY_BACKEND = get_env('CELERY_BACKEND') self.CELERY_RESULT_BACKEND = 'db+{}'.format(self.SQLALCHEMY_DATABASE_URI) \ if self.CELERY_BACKEND == 'database' else self.CELERY_BROKER_URL + self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://') # hosted provider credentials self.OPENAI_API_KEY = get_env('OPENAI_API_KEY') + # By default it is False + # You could disable it for compatibility with certain OpenAPI providers + self.DISABLE_PROVIDER_CONFIG_VALIDATION = get_bool_env('DISABLE_PROVIDER_CONFIG_VALIDATION') + + # For temp use only + # set default LLM provider, default is 'openai', support `azure_openai` + self.DEFAULT_LLM_PROVIDER = get_env('DEFAULT_LLM_PROVIDER') class CloudEditionConfig(Config): diff --git a/api/controllers/console/app/__init__.py b/api/controllers/console/app/__init__.py index 1f22ab30c6..f0c7956e0f 100644 --- a/api/controllers/console/app/__init__.py +++ b/api/controllers/console/app/__init__.py @@ -17,6 +17,6 @@ def _get_app(app_id, mode=None): raise NotFound("App not found") if mode and app.mode != mode: - raise AppUnavailableError() + raise NotFound("The {} app not found".format(mode)) return app diff --git a/api/controllers/console/app/error.py b/api/controllers/console/app/error.py index c19f054be4..8923f90bf8 100644 --- a/api/controllers/console/app/error.py +++ b/api/controllers/console/app/error.py @@ -9,31 +9,33 @@ class AppNotFoundError(BaseHTTPException): class ProviderNotInitializeError(BaseHTTPException): error_code = 'provider_not_initialize' - description = "Provider Token not initialize." + description = "No valid model provider credentials found. " \ + "Please go to Settings -> Model Provider to complete your provider credentials." code = 400 class ProviderQuotaExceededError(BaseHTTPException): error_code = 'provider_quota_exceeded' - description = "Provider quota exceeded." + description = "Your quota for Dify Hosted OpenAI has been exhausted. " \ + "Please go to Settings -> Model Provider to complete your own provider credentials." code = 400 class ProviderModelCurrentlyNotSupportError(BaseHTTPException): error_code = 'model_currently_not_support' - description = "GPT-4 currently not support." + description = "Dify Hosted OpenAI trial currently not support the GPT-4 model." code = 400 class ConversationCompletedError(BaseHTTPException): error_code = 'conversation_completed' - description = "Conversation was completed." + description = "The conversation has ended. Please start a new conversation." code = 400 class AppUnavailableError(BaseHTTPException): error_code = 'app_unavailable' - description = "App unavailable." + description = "App unavailable, please check your app configurations." code = 400 @@ -45,5 +47,5 @@ class CompletionRequestError(BaseHTTPException): class AppMoreLikeThisDisabledError(BaseHTTPException): error_code = 'app_more_like_this_disabled' - description = "More like this disabled." + description = "The 'More like this' feature is disabled. Please refresh your page." code = 403 diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 79e52d565b..3b9efeaab4 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -10,13 +10,14 @@ from werkzeug.exceptions import NotFound, Forbidden import services from controllers.console import api -from controllers.console.app.error import ProviderNotInitializeError +from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \ + ProviderModelCurrentlyNotSupportError from controllers.console.datasets.error import DocumentAlreadyFinishedError, InvalidActionError, DocumentIndexingError, \ InvalidMetadataError, ArchivedDocumentImmutableError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from core.indexing_runner import IndexingRunner -from core.llm.error import ProviderTokenNotInitError +from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from extensions.ext_redis import redis_client from libs.helper import TimestampField from extensions.ext_database import db @@ -222,6 +223,10 @@ class DatasetDocumentListApi(Resource): document = DocumentService.save_document_with_dataset_id(dataset, args, current_user) except ProviderTokenNotInitError: raise ProviderNotInitializeError() + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() return document @@ -259,6 +264,10 @@ class DatasetInitApi(Resource): ) except ProviderTokenNotInitError: raise ProviderNotInitializeError() + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() response = { 'dataset': dataset, diff --git a/api/controllers/console/datasets/error.py b/api/controllers/console/datasets/error.py index 014822d565..29142b80e6 100644 --- a/api/controllers/console/datasets/error.py +++ b/api/controllers/console/datasets/error.py @@ -3,7 +3,7 @@ from libs.exception import BaseHTTPException class NoFileUploadedError(BaseHTTPException): error_code = 'no_file_uploaded' - description = "No file uploaded." + description = "Please upload your file." code = 400 @@ -27,25 +27,25 @@ class UnsupportedFileTypeError(BaseHTTPException): class HighQualityDatasetOnlyError(BaseHTTPException): error_code = 'high_quality_dataset_only' - description = "High quality dataset only." + description = "Current operation only supports 'high-quality' datasets." code = 400 class DatasetNotInitializedError(BaseHTTPException): error_code = 'dataset_not_initialized' - description = "Dataset not initialized." + description = "The dataset is still being initialized or indexing. Please wait a moment." code = 400 class ArchivedDocumentImmutableError(BaseHTTPException): error_code = 'archived_document_immutable' - description = "Cannot process an archived document." + description = "The archived document is not editable." code = 403 class DatasetNameDuplicateError(BaseHTTPException): error_code = 'dataset_name_duplicate' - description = "Dataset name already exists." + description = "The dataset name already exists. Please modify your dataset name." code = 409 @@ -57,17 +57,17 @@ class InvalidActionError(BaseHTTPException): class DocumentAlreadyFinishedError(BaseHTTPException): error_code = 'document_already_finished' - description = "Document already finished." + description = "The document has been processed. Please refresh the page or go to the document details." code = 400 class DocumentIndexingError(BaseHTTPException): error_code = 'document_indexing' - description = "Document indexing." + description = "The document is being processed and cannot be edited." code = 400 class InvalidMetadataError(BaseHTTPException): error_code = 'invalid_metadata' - description = "Invalid metadata." + description = "The metadata content is incorrect. Please check and verify." code = 400 diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index 16bb571df3..771d49045f 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -6,9 +6,12 @@ from werkzeug.exceptions import InternalServerError, NotFound, Forbidden import services from controllers.console import api +from controllers.console.app.error import ProviderNotInitializeError, ProviderQuotaExceededError, \ + ProviderModelCurrentlyNotSupportError from controllers.console.datasets.error import HighQualityDatasetOnlyError, DatasetNotInitializedError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required +from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import TimestampField from services.dataset_service import DatasetService from services.hit_testing_service import HitTestingService @@ -92,6 +95,12 @@ class HitTestingApi(Resource): return {"query": response['query'], 'records': marshal(response['records'], hit_testing_record_fields)} except services.errors.index.IndexNotInitializedError: raise DatasetNotInitializedError() + except ProviderTokenNotInitError: + raise ProviderNotInitializeError() + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() except Exception as e: logging.exception("Hit testing failed.") raise InternalServerError(str(e)) diff --git a/api/controllers/console/error.py b/api/controllers/console/error.py index 3040423d71..e563364f27 100644 --- a/api/controllers/console/error.py +++ b/api/controllers/console/error.py @@ -3,13 +3,14 @@ from libs.exception import BaseHTTPException class AlreadySetupError(BaseHTTPException): error_code = 'already_setup' - description = "Application already setup." + description = "Dify has been successfully installed. Please refresh the page or return to the dashboard homepage." code = 403 class NotSetupError(BaseHTTPException): error_code = 'not_setup' - description = "Application not setup." + description = "Dify has not been initialized and installed yet. " \ + "Please proceed with the initialization and installation process first." code = 401 diff --git a/api/controllers/console/version.py b/api/controllers/console/version.py index 0e6e75c361..285cdcba5c 100644 --- a/api/controllers/console/version.py +++ b/api/controllers/console/version.py @@ -19,6 +19,14 @@ class VersionApi(Resource): args = parser.parse_args() check_update_url = current_app.config['CHECK_UPDATE_URL'] + if not check_update_url: + return { + 'version': '0.0.0', + 'release_date': '', + 'release_notes': '', + 'can_auto_update': False + } + try: response = requests.get(check_update_url, { 'current_version': args.get('current_version') diff --git a/api/controllers/console/workspace/error.py b/api/controllers/console/workspace/error.py index c5e3a3fb6a..cb744232ec 100644 --- a/api/controllers/console/workspace/error.py +++ b/api/controllers/console/workspace/error.py @@ -21,11 +21,11 @@ class InvalidInvitationCodeError(BaseHTTPException): class AccountAlreadyInitedError(BaseHTTPException): error_code = 'account_already_inited' - description = "Account already inited." + description = "The account has been initialized. Please refresh the page." code = 400 class AccountNotInitializedError(BaseHTTPException): error_code = 'account_not_initialized' - description = "Account not initialized." + description = "The account has not been initialized yet. Please proceed with the initialization process first." code = 400 diff --git a/api/controllers/console/workspace/providers.py b/api/controllers/console/workspace/providers.py index bc6b8320af..f2baec29c1 100644 --- a/api/controllers/console/workspace/providers.py +++ b/api/controllers/console/workspace/providers.py @@ -82,29 +82,33 @@ class ProviderTokenApi(Resource): args = parser.parse_args() - if not args['token']: - raise ValueError('Token is empty') + if args['token']: + try: + ProviderService.validate_provider_configs( + tenant=current_user.current_tenant, + provider_name=ProviderName(provider), + configs=args['token'] + ) + token_is_valid = True + except ValidateFailedError as ex: + raise ValueError(str(ex)) - try: - ProviderService.validate_provider_configs( + base64_encrypted_token = ProviderService.get_encrypted_token( tenant=current_user.current_tenant, provider_name=ProviderName(provider), configs=args['token'] ) - token_is_valid = True - except ValidateFailedError: + else: + base64_encrypted_token = None token_is_valid = False tenant = current_user.current_tenant - base64_encrypted_token = ProviderService.get_encrypted_token( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider), - configs=args['token'] - ) - - provider_model = Provider.query.filter_by(tenant_id=tenant.id, provider_name=provider, - provider_type=ProviderType.CUSTOM.value).first() + provider_model = db.session.query(Provider).filter( + Provider.tenant_id == tenant.id, + Provider.provider_name == provider, + Provider.provider_type == ProviderType.CUSTOM.value + ).first() # Only allow updating token for CUSTOM provider type if provider_model: @@ -117,6 +121,16 @@ class ProviderTokenApi(Resource): is_valid=token_is_valid) db.session.add(provider_model) + if provider_model.is_valid: + other_providers = db.session.query(Provider).filter( + Provider.tenant_id == tenant.id, + Provider.provider_name != provider, + Provider.provider_type == ProviderType.CUSTOM.value + ).all() + + for other_provider in other_providers: + other_provider.is_valid = False + db.session.commit() if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value, @@ -143,7 +157,7 @@ class ProviderTokenValidateApi(Resource): args = parser.parse_args() # todo: remove this when the provider is supported - if provider in [ProviderName.ANTHROPIC.value, ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value, + if provider in [ProviderName.ANTHROPIC.value, ProviderName.COHERE.value, ProviderName.HUGGINGFACEHUB.value]: return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'} diff --git a/api/controllers/service_api/app/error.py b/api/controllers/service_api/app/error.py index c59f570efd..b7f6e0f6fa 100644 --- a/api/controllers/service_api/app/error.py +++ b/api/controllers/service_api/app/error.py @@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException class AppUnavailableError(BaseHTTPException): error_code = 'app_unavailable' - description = "App unavailable." + description = "App unavailable, please check your app configurations." code = 400 class NotCompletionAppError(BaseHTTPException): error_code = 'not_completion_app' - description = "Not Completion App" + description = "Please check if your Completion app mode matches the right API route." code = 400 class NotChatAppError(BaseHTTPException): error_code = 'not_chat_app' - description = "Not Chat App" + description = "Please check if your Chat app mode matches the right API route." code = 400 class ConversationCompletedError(BaseHTTPException): error_code = 'conversation_completed' - description = "Conversation Completed." + description = "The conversation has ended. Please start a new conversation." code = 400 class ProviderNotInitializeError(BaseHTTPException): error_code = 'provider_not_initialize' - description = "Provider Token not initialize." + description = "No valid model provider credentials found. " \ + "Please go to Settings -> Model Provider to complete your provider credentials." code = 400 class ProviderQuotaExceededError(BaseHTTPException): error_code = 'provider_quota_exceeded' - description = "Provider quota exceeded." + description = "Your quota for Dify Hosted OpenAI has been exhausted. " \ + "Please go to Settings -> Model Provider to complete your own provider credentials." code = 400 class ProviderModelCurrentlyNotSupportError(BaseHTTPException): error_code = 'model_currently_not_support' - description = "GPT-4 currently not support." + description = "Dify Hosted OpenAI trial currently not support the GPT-4 model." code = 400 diff --git a/api/controllers/service_api/dataset/error.py b/api/controllers/service_api/dataset/error.py index d231e0b40a..2131fe0bac 100644 --- a/api/controllers/service_api/dataset/error.py +++ b/api/controllers/service_api/dataset/error.py @@ -16,5 +16,5 @@ class DocumentIndexingError(BaseHTTPException): class DatasetNotInitedError(BaseHTTPException): error_code = 'dataset_not_inited' - description = "Dataset not inited." + description = "The dataset is still being initialized or indexing. Please wait a moment." code = 403 diff --git a/api/controllers/web/error.py b/api/controllers/web/error.py index ea72422a1b..fdfe36f6d1 100644 --- a/api/controllers/web/error.py +++ b/api/controllers/web/error.py @@ -4,43 +4,45 @@ from libs.exception import BaseHTTPException class AppUnavailableError(BaseHTTPException): error_code = 'app_unavailable' - description = "App unavailable." + description = "App unavailable, please check your app configurations." code = 400 class NotCompletionAppError(BaseHTTPException): error_code = 'not_completion_app' - description = "Not Completion App" + description = "Please check if your Completion app mode matches the right API route." code = 400 class NotChatAppError(BaseHTTPException): error_code = 'not_chat_app' - description = "Not Chat App" + description = "Please check if your Chat app mode matches the right API route." code = 400 class ConversationCompletedError(BaseHTTPException): error_code = 'conversation_completed' - description = "Conversation Completed." + description = "The conversation has ended. Please start a new conversation." code = 400 class ProviderNotInitializeError(BaseHTTPException): error_code = 'provider_not_initialize' - description = "Provider Token not initialize." + description = "No valid model provider credentials found. " \ + "Please go to Settings -> Model Provider to complete your provider credentials." code = 400 class ProviderQuotaExceededError(BaseHTTPException): error_code = 'provider_quota_exceeded' - description = "Provider quota exceeded." + description = "Your quota for Dify Hosted OpenAI has been exhausted. " \ + "Please go to Settings -> Model Provider to complete your own provider credentials." code = 400 class ProviderModelCurrentlyNotSupportError(BaseHTTPException): error_code = 'model_currently_not_support' - description = "GPT-4 currently not support." + description = "Dify Hosted OpenAI trial currently not support the GPT-4 model." code = 400 @@ -52,11 +54,11 @@ class CompletionRequestError(BaseHTTPException): class AppMoreLikeThisDisabledError(BaseHTTPException): error_code = 'app_more_like_this_disabled' - description = "More like this disabled." + description = "The 'More like this' feature is disabled. Please refresh your page." code = 403 class AppSuggestedQuestionsAfterAnswerDisabledError(BaseHTTPException): error_code = 'app_suggested_questions_after_answer_disabled' - description = "Function Suggested questions after answer disabled." + description = "The 'Suggested Questions After Answer' feature is disabled. Please refresh your page." code = 403 diff --git a/api/core/completion.py b/api/core/completion.py index f215bd0ee5..5e559ac7c7 100644 --- a/api/core/completion.py +++ b/api/core/completion.py @@ -1,4 +1,4 @@ -from typing import Optional, List, Union +from typing import Optional, List, Union, Tuple from langchain.callbacks import CallbackManager from langchain.chat_models.base import BaseChatModel @@ -39,7 +39,8 @@ class Completion: memory = cls.get_memory_from_conversation( tenant_id=app.tenant_id, app_model_config=app_model_config, - conversation=conversation + conversation=conversation, + return_messages=False ) inputs = conversation.inputs @@ -96,7 +97,7 @@ class Completion: ) # get llm prompt - prompt = cls.get_main_llm_prompt( + prompt, stop_words = cls.get_main_llm_prompt( mode=mode, llm=final_llm, pre_prompt=app_model_config.pre_prompt, @@ -114,30 +115,47 @@ class Completion: mode=mode ) - response = final_llm.generate([prompt]) + response = final_llm.generate([prompt], stop_words) return response @classmethod - def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, chain_output: Optional[str], + def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, pre_prompt: str, query: str, inputs: dict, + chain_output: Optional[str], memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \ - Union[str | List[BaseMessage]]: + Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]: + # disable template string in query + query_params = OutLinePromptTemplate.from_template(template=query).input_variables + if query_params: + for query_param in query_params: + if query_param not in inputs: + inputs[query_param] = '{' + query_param + '}' + pre_prompt = PromptBuilder.process_template(pre_prompt) if pre_prompt else pre_prompt if mode == 'completion': prompt_template = OutLinePromptTemplate.from_template( - template=("Use the following pieces of [CONTEXT] to answer the question at the end. " - "If you don't know the answer, " - "just say that you don't know, don't try to make up an answer. \n" - "```\n" - "[CONTEXT]\n" - "{context}\n" - "```\n" if chain_output else "") + template=("""Use the following CONTEXT as your learned knowledge: +[CONTEXT] +{context} +[END CONTEXT] + +When answer to user: +- If you don't know, just say that you don't know. +- If you don't know when you are not sure, ask for clarification. +Avoid mentioning that you obtained the information from the context. +And answer according to the language of the user's question. +""" if chain_output else "") + (pre_prompt + "\n" if pre_prompt else "") + "{query}\n" ) if chain_output: inputs['context'] = chain_output + context_params = OutLinePromptTemplate.from_template(template=chain_output).input_variables + if context_params: + for context_param in context_params: + if context_param not in inputs: + inputs[context_param] = '{' + context_param + '}' prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs} prompt_content = prompt_template.format( @@ -147,64 +165,83 @@ class Completion: if isinstance(llm, BaseChatModel): # use chat llm as completion model - return [HumanMessage(content=prompt_content)] + return [HumanMessage(content=prompt_content)], None else: - return prompt_content + return prompt_content, None else: messages: List[BaseMessage] = [] - system_message = None - if pre_prompt: - # append pre prompt as system message - system_message = PromptBuilder.to_system_message(pre_prompt, inputs) - - if chain_output: - # append context as system message, currently only use simple stuff prompt - context_message = PromptBuilder.to_system_message( - """Use the following pieces of [CONTEXT] to answer the users question. -If you don't know the answer, just say that you don't know, don't try to make up an answer. -``` -[CONTEXT] -{context} -```""", - {'context': chain_output} - ) - - if not system_message: - system_message = context_message - else: - system_message.content = context_message.content + "\n\n" + system_message.content - - if system_message: - messages.append(system_message) - human_inputs = { "query": query } - # construct main prompt - human_message = PromptBuilder.to_human_message( - prompt_content="{query}", - inputs=human_inputs - ) + human_message_prompt = "" + + if pre_prompt: + pre_prompt_inputs = {k: inputs[k] for k in + OutLinePromptTemplate.from_template(template=pre_prompt).input_variables + if k in inputs} + + if pre_prompt_inputs: + human_inputs.update(pre_prompt_inputs) + + if chain_output: + human_inputs['context'] = chain_output + human_message_prompt += """Use the following CONTEXT as your learned knowledge. +[CONTEXT] +{context} +[END CONTEXT] + +When answer to user: +- If you don't know, just say that you don't know. +- If you don't know when you are not sure, ask for clarification. +Avoid mentioning that you obtained the information from the context. +And answer according to the language of the user's question. +""" + + if pre_prompt: + human_message_prompt += pre_prompt + + query_prompt = "\nHuman: {query}\nAI: " if memory: # append chat histories - tmp_messages = messages.copy() + [human_message] - curr_message_tokens = memory.llm.get_messages_tokens(tmp_messages) - rest_tokens = llm_constant.max_context_token_length[ - memory.llm.model_name] - memory.llm.max_tokens - curr_message_tokens + tmp_human_message = PromptBuilder.to_human_message( + prompt_content=human_message_prompt + query_prompt, + inputs=human_inputs + ) + + curr_message_tokens = memory.llm.get_messages_tokens([tmp_human_message]) + rest_tokens = llm_constant.max_context_token_length[memory.llm.model_name] \ + - memory.llm.max_tokens - curr_message_tokens rest_tokens = max(rest_tokens, 0) - history_messages = cls.get_history_messages_from_memory(memory, rest_tokens) - messages += history_messages + histories = cls.get_history_messages_from_memory(memory, rest_tokens) + + # disable template string in query + histories_params = OutLinePromptTemplate.from_template(template=histories).input_variables + if histories_params: + for histories_param in histories_params: + if histories_param not in human_inputs: + human_inputs[histories_param] = '{' + histories_param + '}' + + human_message_prompt += "\n\n" + histories + + human_message_prompt += query_prompt + + # construct main prompt + human_message = PromptBuilder.to_human_message( + prompt_content=human_message_prompt, + inputs=human_inputs + ) messages.append(human_message) - return messages + return messages, ['\nHuman:'] @classmethod def get_llm_callback_manager(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI], - streaming: bool, conversation_message_task: ConversationMessageTask) -> CallbackManager: + streaming: bool, + conversation_message_task: ConversationMessageTask) -> CallbackManager: llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task) if streaming: callback_handlers = [llm_callback_handler, DifyStreamingStdOutCallbackHandler()] @@ -216,7 +253,7 @@ If you don't know the answer, just say that you don't know, don't try to make up @classmethod def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory, max_token_limit: int) -> \ - List[BaseMessage]: + str: """Get memory messages.""" memory.max_token_limit = max_token_limit memory_key = memory.memory_variables[0] @@ -286,7 +323,7 @@ If you don't know the answer, just say that you don't know, don't try to make up ) # get llm prompt - original_prompt = cls.get_main_llm_prompt( + original_prompt, _ = cls.get_main_llm_prompt( mode="completion", llm=llm, pre_prompt=pre_prompt, diff --git a/api/core/conversation_message_task.py b/api/core/conversation_message_task.py index 81477533e7..b23d6664bd 100644 --- a/api/core/conversation_message_task.py +++ b/api/core/conversation_message_task.py @@ -56,6 +56,9 @@ class ConversationMessageTask: ) def init(self): + provider_name = LLMBuilder.get_default_provider(self.app.tenant_id) + self.model_dict['provider'] = provider_name + override_model_configs = None if self.is_override: override_model_configs = { @@ -281,6 +284,9 @@ class PubHandler: @classmethod def generate_channel_name(cls, user: Union[Account | EndUser], task_id: str): + if not user: + raise ValueError("user is required") + user_str = 'account-' + user.id if isinstance(user, Account) else 'end-user-' + user.id return "generate_result:{}-{}".format(user_str, task_id) diff --git a/api/core/embedding/openai_embedding.py b/api/core/embedding/openai_embedding.py index 0938397423..0f7cb252e2 100644 --- a/api/core/embedding/openai_embedding.py +++ b/api/core/embedding/openai_embedding.py @@ -11,9 +11,10 @@ from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_except @retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6)) def get_embedding( - text: str, - engine: Optional[str] = None, - openai_api_key: Optional[str] = None, + text: str, + engine: Optional[str] = None, + api_key: Optional[str] = None, + **kwargs ) -> List[float]: """Get embedding. @@ -25,11 +26,12 @@ def get_embedding( """ text = text.replace("\n", " ") - return openai.Embedding.create(input=[text], engine=engine, api_key=openai_api_key)["data"][0]["embedding"] + return openai.Embedding.create(input=[text], engine=engine, api_key=api_key, **kwargs)["data"][0]["embedding"] @retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6)) -async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key: Optional[str] = None) -> List[float]: +async def aget_embedding(text: str, engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs) -> List[ + float]: """Asynchronously get embedding. NOTE: Copied from OpenAI's embedding utils: @@ -42,16 +44,17 @@ async def aget_embedding(text: str, engine: Optional[str] = None, openai_api_key # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") - return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=openai_api_key))["data"][0][ + return (await openai.Embedding.acreate(input=[text], engine=engine, api_key=api_key, **kwargs))["data"][0][ "embedding" ] @retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6)) def get_embeddings( - list_of_text: List[str], - engine: Optional[str] = None, - openai_api_key: Optional[str] = None + list_of_text: List[str], + engine: Optional[str] = None, + api_key: Optional[str] = None, + **kwargs ) -> List[List[float]]: """Get embeddings. @@ -67,14 +70,14 @@ def get_embeddings( # replace newlines, which can negatively affect performance. list_of_text = [text.replace("\n", " ") for text in list_of_text] - data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=openai_api_key).data + data = openai.Embedding.create(input=list_of_text, engine=engine, api_key=api_key, **kwargs).data data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input. return [d["embedding"] for d in data] @retry(reraise=True, wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6)) async def aget_embeddings( - list_of_text: List[str], engine: Optional[str] = None, openai_api_key: Optional[str] = None + list_of_text: List[str], engine: Optional[str] = None, api_key: Optional[str] = None, **kwargs ) -> List[List[float]]: """Asynchronously get embeddings. @@ -90,7 +93,7 @@ async def aget_embeddings( # replace newlines, which can negatively affect performance. list_of_text = [text.replace("\n", " ") for text in list_of_text] - data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=openai_api_key)).data + data = (await openai.Embedding.acreate(input=list_of_text, engine=engine, api_key=api_key, **kwargs)).data data = sorted(data, key=lambda x: x["index"]) # maintain the same order as input. return [d["embedding"] for d in data] @@ -98,19 +101,30 @@ async def aget_embeddings( class OpenAIEmbedding(BaseEmbedding): def __init__( - self, - mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE, - model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002, - deployment_name: Optional[str] = None, - openai_api_key: Optional[str] = None, - **kwargs: Any, + self, + mode: str = OpenAIEmbeddingMode.TEXT_SEARCH_MODE, + model: str = OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002, + deployment_name: Optional[str] = None, + openai_api_key: Optional[str] = None, + **kwargs: Any, ) -> None: """Init params.""" - super().__init__(**kwargs) + new_kwargs = {} + + if 'embed_batch_size' in kwargs: + new_kwargs['embed_batch_size'] = kwargs['embed_batch_size'] + + if 'tokenizer' in kwargs: + new_kwargs['tokenizer'] = kwargs['tokenizer'] + + super().__init__(**new_kwargs) self.mode = OpenAIEmbeddingMode(mode) self.model = OpenAIEmbeddingModelType(model) self.deployment_name = deployment_name self.openai_api_key = openai_api_key + self.openai_api_type = kwargs.get('openai_api_type') + self.openai_api_version = kwargs.get('openai_api_version') + self.openai_api_base = kwargs.get('openai_api_base') @handle_llm_exceptions def _get_query_embedding(self, query: str) -> List[float]: @@ -122,7 +136,9 @@ class OpenAIEmbedding(BaseEmbedding): if key not in _QUERY_MODE_MODEL_DICT: raise ValueError(f"Invalid mode, model combination: {key}") engine = _QUERY_MODE_MODEL_DICT[key] - return get_embedding(query, engine=engine, openai_api_key=self.openai_api_key) + return get_embedding(query, engine=engine, api_key=self.openai_api_key, + api_type=self.openai_api_type, api_version=self.openai_api_version, + api_base=self.openai_api_base) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" @@ -133,7 +149,9 @@ class OpenAIEmbedding(BaseEmbedding): if key not in _TEXT_MODE_MODEL_DICT: raise ValueError(f"Invalid mode, model combination: {key}") engine = _TEXT_MODE_MODEL_DICT[key] - return get_embedding(text, engine=engine, openai_api_key=self.openai_api_key) + return get_embedding(text, engine=engine, api_key=self.openai_api_key, + api_type=self.openai_api_type, api_version=self.openai_api_version, + api_base=self.openai_api_base) async def _aget_text_embedding(self, text: str) -> List[float]: """Asynchronously get text embedding.""" @@ -144,7 +162,9 @@ class OpenAIEmbedding(BaseEmbedding): if key not in _TEXT_MODE_MODEL_DICT: raise ValueError(f"Invalid mode, model combination: {key}") engine = _TEXT_MODE_MODEL_DICT[key] - return await aget_embedding(text, engine=engine, openai_api_key=self.openai_api_key) + return await aget_embedding(text, engine=engine, api_key=self.openai_api_key, + api_type=self.openai_api_type, api_version=self.openai_api_version, + api_base=self.openai_api_base) def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings. @@ -160,7 +180,9 @@ class OpenAIEmbedding(BaseEmbedding): if key not in _TEXT_MODE_MODEL_DICT: raise ValueError(f"Invalid mode, model combination: {key}") engine = _TEXT_MODE_MODEL_DICT[key] - embeddings = get_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key) + embeddings = get_embeddings(texts, engine=engine, api_key=self.openai_api_key, + api_type=self.openai_api_type, api_version=self.openai_api_version, + api_base=self.openai_api_base) return embeddings async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]: @@ -172,5 +194,7 @@ class OpenAIEmbedding(BaseEmbedding): if key not in _TEXT_MODE_MODEL_DICT: raise ValueError(f"Invalid mode, model combination: {key}") engine = _TEXT_MODE_MODEL_DICT[key] - embeddings = await aget_embeddings(texts, engine=engine, openai_api_key=self.openai_api_key) + embeddings = await aget_embeddings(texts, engine=engine, api_key=self.openai_api_key, + api_type=self.openai_api_type, api_version=self.openai_api_version, + api_base=self.openai_api_base) return embeddings diff --git a/api/core/index/index_builder.py b/api/core/index/index_builder.py index baf16b0f3a..05f08075d4 100644 --- a/api/core/index/index_builder.py +++ b/api/core/index/index_builder.py @@ -33,8 +33,11 @@ class IndexBuilder: max_chunk_overlap=20 ) + provider = LLMBuilder.get_default_provider(tenant_id) + model_credentials = LLMBuilder.get_model_credentials( tenant_id=tenant_id, + model_provider=provider, model_name='text-embedding-ada-002' ) @@ -43,3 +46,15 @@ class IndexBuilder: prompt_helper=prompt_helper, embed_model=OpenAIEmbedding(**model_credentials), ) + + @classmethod + def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext: + llm = LLMBuilder.to_llm( + tenant_id=tenant_id, + model_name='fake' + ) + + return ServiceContext.from_defaults( + llm_predictor=LLMPredictor(llm=llm), + embed_model=OpenAIEmbedding() + ) diff --git a/api/core/index/vector_index.py b/api/core/index/vector_index.py index f9d8542a8c..fa1c93cc06 100644 --- a/api/core/index/vector_index.py +++ b/api/core/index/vector_index.py @@ -83,7 +83,7 @@ class VectorIndex: if not self._dataset.index_struct_dict: return - service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id) + service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id) index = vector_store.get_index( service_context=service_context, @@ -101,7 +101,7 @@ class VectorIndex: if not self._dataset.index_struct_dict: return - service_context = IndexBuilder.get_default_service_context(tenant_id=self._dataset.tenant_id) + service_context = IndexBuilder.get_fake_llm_service_context(tenant_id=self._dataset.tenant_id) index = vector_store.get_index( service_context=service_context, diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 12aee6e030..74aff357c3 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -400,7 +400,7 @@ class IndexingRunner: # parse document to nodes nodes = node_parser.get_nodes_from_documents([text_doc]) - + nodes = [node for node in nodes if node.text is not None and node.text.strip()] all_nodes.extend(nodes) return all_nodes diff --git a/api/core/llm/llm_builder.py b/api/core/llm/llm_builder.py index 4355593c5d..30b0a931b3 100644 --- a/api/core/llm/llm_builder.py +++ b/api/core/llm/llm_builder.py @@ -4,9 +4,14 @@ from langchain.callbacks import CallbackManager from langchain.llms.fake import FakeListLLM from core.constant import llm_constant +from core.llm.error import ProviderTokenNotInitError +from core.llm.provider.base import BaseProvider from core.llm.provider.llm_provider_service import LLMProviderService +from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI +from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI from core.llm.streamable_chat_open_ai import StreamableChatOpenAI from core.llm.streamable_open_ai import StreamableOpenAI +from models.provider import ProviderType class LLMBuilder: @@ -31,16 +36,23 @@ class LLMBuilder: if model_name == 'fake': return FakeListLLM(responses=[]) + provider = cls.get_default_provider(tenant_id) + mode = cls.get_mode_by_model(model_name) if mode == 'chat': - # llm_cls = StreamableAzureChatOpenAI - llm_cls = StreamableChatOpenAI + if provider == 'openai': + llm_cls = StreamableChatOpenAI + else: + llm_cls = StreamableAzureChatOpenAI elif mode == 'completion': - llm_cls = StreamableOpenAI + if provider == 'openai': + llm_cls = StreamableOpenAI + else: + llm_cls = StreamableAzureOpenAI else: raise ValueError(f"model name {model_name} is not supported.") - model_credentials = cls.get_model_credentials(tenant_id, model_name) + model_credentials = cls.get_model_credentials(tenant_id, provider, model_name) return llm_cls( model_name=model_name, @@ -86,18 +98,31 @@ class LLMBuilder: raise ValueError(f"model name {model_name} is not supported.") @classmethod - def get_model_credentials(cls, tenant_id: str, model_name: str) -> dict: + def get_model_credentials(cls, tenant_id: str, model_provider: str, model_name: str) -> dict: """ Returns the API credentials for the given tenant_id and model_name, based on the model's provider. Raises an exception if the model_name is not found or if the provider is not found. """ if not model_name: raise Exception('model name not found') + # + # if model_name not in llm_constant.models: + # raise Exception('model {} not found'.format(model_name)) - if model_name not in llm_constant.models: - raise Exception('model {} not found'.format(model_name)) - - model_provider = llm_constant.models[model_name] + # model_provider = llm_constant.models[model_name] provider_service = LLMProviderService(tenant_id=tenant_id, provider_name=model_provider) return provider_service.get_credentials(model_name) + + @classmethod + def get_default_provider(cls, tenant_id: str) -> str: + provider = BaseProvider.get_valid_provider(tenant_id) + if not provider: + raise ProviderTokenNotInitError() + + if provider.provider_type == ProviderType.SYSTEM.value: + provider_name = 'openai' + else: + provider_name = provider.provider_name + + return provider_name diff --git a/api/core/llm/provider/azure_provider.py b/api/core/llm/provider/azure_provider.py index e0ba0d0734..649c64cf73 100644 --- a/api/core/llm/provider/azure_provider.py +++ b/api/core/llm/provider/azure_provider.py @@ -1,22 +1,24 @@ import json +import logging from typing import Optional, Union import requests from core.llm.provider.base import BaseProvider +from core.llm.provider.errors import ValidateFailedError from models.provider import ProviderName class AzureProvider(BaseProvider): - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - credentials = self.get_credentials(model_id) + def get_models(self, model_id: Optional[str] = None, credentials: Optional[dict] = None) -> list[dict]: + credentials = self.get_credentials(model_id) if not credentials else credentials url = "{}/openai/deployments?api-version={}".format( - credentials.get('openai_api_base'), - credentials.get('openai_api_version') + str(credentials.get('openai_api_base')), + str(credentials.get('openai_api_version')) ) headers = { - "api-key": credentials.get('openai_api_key'), + "api-key": str(credentials.get('openai_api_key')), "content-type": "application/json; charset=utf-8" } @@ -29,17 +31,18 @@ class AzureProvider(BaseProvider): 'name': '{} ({})'.format(deployment['id'], deployment['model']) } for deployment in result['data'] if deployment['status'] == 'succeeded'] else: - # TODO: optimize in future - raise Exception('Failed to get deployments from Azure OpenAI. Status code: {}'.format(response.status_code)) + if response.status_code == 401: + raise AzureAuthenticationError() + else: + raise AzureRequestFailedError('Failed to request Azure OpenAI. Status code: {}'.format(response.status_code)) def get_credentials(self, model_id: Optional[str] = None) -> dict: """ Returns the API credentials for Azure OpenAI as a dictionary. """ - encrypted_config = self.get_provider_api_key(model_id=model_id) - config = json.loads(encrypted_config) + config = self.get_provider_api_key(model_id=model_id) config['openai_api_type'] = 'azure' - config['deployment_name'] = model_id + config['deployment_name'] = model_id.replace('.', '') if model_id else None return config def get_provider_name(self): @@ -51,12 +54,11 @@ class AzureProvider(BaseProvider): """ try: config = self.get_provider_api_key() - config = json.loads(config) except: config = { 'openai_api_type': 'azure', 'openai_api_version': '2023-03-15-preview', - 'openai_api_base': 'https://foo.microsoft.com/bar', + 'openai_api_base': '', 'openai_api_key': '' } @@ -65,7 +67,7 @@ class AzureProvider(BaseProvider): config = { 'openai_api_type': 'azure', 'openai_api_version': '2023-03-15-preview', - 'openai_api_base': 'https://foo.microsoft.com/bar', + 'openai_api_base': '', 'openai_api_key': '' } @@ -76,14 +78,47 @@ class AzureProvider(BaseProvider): def get_token_type(self): # TODO: change to dict when implemented - return lambda value: value + return dict def config_validate(self, config: Union[dict | str]): """ Validates the given config. """ - # TODO: implement - pass + try: + if not isinstance(config, dict): + raise ValueError('Config must be a object.') + + if 'openai_api_version' not in config: + config['openai_api_version'] = '2023-03-15-preview' + + models = self.get_models(credentials=config) + + if not models: + raise ValidateFailedError("Please add deployments for 'text-davinci-003', " + "'gpt-3.5-turbo', 'text-embedding-ada-002'.") + + fixed_model_ids = [ + 'text-davinci-003', + 'gpt-35-turbo', + 'text-embedding-ada-002' + ] + + current_model_ids = [model['id'] for model in models] + + missing_model_ids = [fixed_model_id for fixed_model_id in fixed_model_ids if + fixed_model_id not in current_model_ids] + + if missing_model_ids: + raise ValidateFailedError("Please add deployments for '{}'.".format(", ".join(missing_model_ids))) + except AzureAuthenticationError: + raise ValidateFailedError('Validation failed, please check your API Key.') + except (requests.ConnectionError, requests.RequestException): + raise ValidateFailedError('Validation failed, please check your API Base Endpoint.') + except AzureRequestFailedError as ex: + raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex))) + except Exception as ex: + logging.exception('Azure OpenAI Credentials validation failed') + raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex))) def get_encrypted_token(self, config: Union[dict | str]): """ @@ -103,3 +138,11 @@ class AzureProvider(BaseProvider): config = json.loads(token) config['openai_api_key'] = self.decrypt_token(config['openai_api_key']) return config + + +class AzureAuthenticationError(Exception): + pass + + +class AzureRequestFailedError(Exception): + pass diff --git a/api/core/llm/provider/base.py b/api/core/llm/provider/base.py index 89343ff62a..71bb32dca6 100644 --- a/api/core/llm/provider/base.py +++ b/api/core/llm/provider/base.py @@ -14,7 +14,7 @@ class BaseProvider(ABC): def __init__(self, tenant_id: str): self.tenant_id = tenant_id - def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> str: + def get_provider_api_key(self, model_id: Optional[str] = None, prefer_custom: bool = True) -> Union[str | dict]: """ Returns the decrypted API key for the given tenant_id and provider_name. If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError. @@ -43,23 +43,35 @@ class BaseProvider(ABC): Returns the Provider instance for the given tenant_id and provider_name. If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag. """ - providers = db.session.query(Provider).filter( - Provider.tenant_id == self.tenant_id, - Provider.provider_name == self.get_provider_name().value - ).order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all() + return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, prefer_custom) + + @classmethod + def get_valid_provider(cls, tenant_id: str, provider_name: str = None, prefer_custom: bool = False) -> Optional[Provider]: + """ + Returns the Provider instance for the given tenant_id and provider_name. + If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag. + """ + query = db.session.query(Provider).filter( + Provider.tenant_id == tenant_id + ) + + if provider_name: + query = query.filter(Provider.provider_name == provider_name) + + providers = query.order_by(Provider.provider_type.desc() if prefer_custom else Provider.provider_type).all() custom_provider = None system_provider = None for provider in providers: - if provider.provider_type == ProviderType.CUSTOM.value: + if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config: custom_provider = provider - elif provider.provider_type == ProviderType.SYSTEM.value: + elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid: system_provider = provider - if custom_provider and custom_provider.is_valid and custom_provider.encrypted_config: + if custom_provider: return custom_provider - elif system_provider and system_provider.is_valid: + elif system_provider: return system_provider else: return None @@ -80,7 +92,7 @@ class BaseProvider(ABC): try: config = self.get_provider_api_key() except: - config = 'THIS-IS-A-MOCK-TOKEN' + config = '' if obfuscated: return self.obfuscated_token(config) diff --git a/api/core/llm/streamable_azure_chat_open_ai.py b/api/core/llm/streamable_azure_chat_open_ai.py index 539ce92774..f3d514cf58 100644 --- a/api/core/llm/streamable_azure_chat_open_ai.py +++ b/api/core/llm/streamable_azure_chat_open_ai.py @@ -1,12 +1,50 @@ -import requests from langchain.schema import BaseMessage, ChatResult, LLMResult from langchain.chat_models import AzureChatOpenAI -from typing import Optional, List +from typing import Optional, List, Dict, Any + +from pydantic import root_validator from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async class StreamableAzureChatOpenAI(AzureChatOpenAI): + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + try: + import openai + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + try: + values["client"] = openai.ChatCompletion + except AttributeError: + raise ValueError( + "`openai` has no `ChatCompletion` attribute, this is likely " + "due to an old version of the openai package. Try upgrading it " + "with `pip install --upgrade openai`." + ) + if values["n"] < 1: + raise ValueError("n must be at least 1.") + if values["n"] > 1 and values["streaming"]: + raise ValueError("n must be 1 when streaming.") + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return { + **super()._default_params, + "engine": self.deployment_name, + "api_type": self.openai_api_type, + "api_base": self.openai_api_base, + "api_version": self.openai_api_version, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + } + def get_messages_tokens(self, messages: List[BaseMessage]) -> int: """Get the number of tokens in a list of messages. diff --git a/api/core/llm/streamable_azure_open_ai.py b/api/core/llm/streamable_azure_open_ai.py new file mode 100644 index 0000000000..e383f8cf23 --- /dev/null +++ b/api/core/llm/streamable_azure_open_ai.py @@ -0,0 +1,64 @@ +import os + +from langchain.llms import AzureOpenAI +from langchain.schema import LLMResult +from typing import Optional, List, Dict, Mapping, Any + +from pydantic import root_validator + +from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async + + +class StreamableAzureOpenAI(AzureOpenAI): + openai_api_type: str = "azure" + openai_api_version: str = "" + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + try: + import openai + + values["client"] = openai.Completion + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + if values["streaming"] and values["n"] > 1: + raise ValueError("Cannot stream results when n > 1.") + if values["streaming"] and values["best_of"] > 1: + raise ValueError("Cannot stream results when best_of > 1.") + return values + + @property + def _invocation_params(self) -> Dict[str, Any]: + return {**super()._invocation_params, **{ + "api_type": self.openai_api_type, + "api_base": self.openai_api_base, + "api_version": self.openai_api_version, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + }} + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return {**super()._identifying_params, **{ + "api_type": self.openai_api_type, + "api_base": self.openai_api_base, + "api_version": self.openai_api_version, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + }} + + @handle_llm_exceptions + def generate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + return super().generate(prompts, stop) + + @handle_llm_exceptions_async + async def agenerate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + return await super().agenerate(prompts, stop) diff --git a/api/core/llm/streamable_chat_open_ai.py b/api/core/llm/streamable_chat_open_ai.py index 59391e4ce0..582041ba09 100644 --- a/api/core/llm/streamable_chat_open_ai.py +++ b/api/core/llm/streamable_chat_open_ai.py @@ -1,12 +1,52 @@ +import os + from langchain.schema import BaseMessage, ChatResult, LLMResult from langchain.chat_models import ChatOpenAI -from typing import Optional, List +from typing import Optional, List, Dict, Any + +from pydantic import root_validator from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async class StreamableChatOpenAI(ChatOpenAI): + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + try: + import openai + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + try: + values["client"] = openai.ChatCompletion + except AttributeError: + raise ValueError( + "`openai` has no `ChatCompletion` attribute, this is likely " + "due to an old version of the openai package. Try upgrading it " + "with `pip install --upgrade openai`." + ) + if values["n"] < 1: + raise ValueError("n must be at least 1.") + if values["n"] > 1 and values["streaming"]: + raise ValueError("n must be 1 when streaming.") + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return { + **super()._default_params, + "api_type": 'openai', + "api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"), + "api_version": None, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + } + def get_messages_tokens(self, messages: List[BaseMessage]) -> int: """Get the number of tokens in a list of messages. diff --git a/api/core/llm/streamable_open_ai.py b/api/core/llm/streamable_open_ai.py index 94754af30e..9cf1b4c4bb 100644 --- a/api/core/llm/streamable_open_ai.py +++ b/api/core/llm/streamable_open_ai.py @@ -1,12 +1,54 @@ +import os + from langchain.schema import LLMResult -from typing import Optional, List +from typing import Optional, List, Dict, Any, Mapping from langchain import OpenAI +from pydantic import root_validator from core.llm.error_handle_wraps import handle_llm_exceptions, handle_llm_exceptions_async class StreamableOpenAI(OpenAI): + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + try: + import openai + + values["client"] = openai.Completion + except ImportError: + raise ValueError( + "Could not import openai python package. " + "Please install it with `pip install openai`." + ) + if values["streaming"] and values["n"] > 1: + raise ValueError("Cannot stream results when n > 1.") + if values["streaming"] and values["best_of"] > 1: + raise ValueError("Cannot stream results when best_of > 1.") + return values + + @property + def _invocation_params(self) -> Dict[str, Any]: + return {**super()._invocation_params, **{ + "api_type": 'openai', + "api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"), + "api_version": None, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + }} + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return {**super()._identifying_params, **{ + "api_type": 'openai', + "api_base": os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1"), + "api_version": None, + "api_key": self.openai_api_key, + "organization": self.openai_organization if self.openai_organization else None, + }} + + @handle_llm_exceptions def generate( self, prompts: List[str], stop: Optional[List[str]] = None diff --git a/api/core/vector_store/weaviate_vector_store_client.py b/api/core/vector_store/weaviate_vector_store_client.py index 2310278cf9..ea020f951e 100644 --- a/api/core/vector_store/weaviate_vector_store_client.py +++ b/api/core/vector_store/weaviate_vector_store_client.py @@ -29,7 +29,7 @@ class WeaviateVectorStoreClient(BaseVectorStoreClient): return weaviate.Client( url=endpoint, auth_client_secret=auth_config, - timeout_config=(5, 15), + timeout_config=(5, 60), startup_period=None ) diff --git a/api/extensions/ext_celery.py b/api/extensions/ext_celery.py index f738b984d9..5750d77dba 100644 --- a/api/extensions/ext_celery.py +++ b/api/extensions/ext_celery.py @@ -15,9 +15,24 @@ def init_app(app: Flask) -> Celery: backend=app.config["CELERY_BACKEND"], task_ignore_result=True, ) + + # Add SSL options to the Celery configuration + ssl_options = { + "ssl_cert_reqs": None, + "ssl_ca_certs": None, + "ssl_certfile": None, + "ssl_keyfile": None, + } + celery_app.conf.update( result_backend=app.config["CELERY_RESULT_BACKEND"], ) + + if app.config["BROKER_USE_SSL"]: + celery_app.conf.update( + broker_use_ssl=ssl_options, # Add the SSL options to the broker configuration + ) + celery_app.set_default() app.extensions["celery"] = celery_app return celery_app diff --git a/api/extensions/ext_redis.py b/api/extensions/ext_redis.py index c3e021e798..f00b300808 100644 --- a/api/extensions/ext_redis.py +++ b/api/extensions/ext_redis.py @@ -1,18 +1,23 @@ import redis - +from redis.connection import SSLConnection, Connection redis_client = redis.Redis() def init_app(app): + connection_class = Connection + if app.config.get('REDIS_USE_SSL', False): + connection_class = SSLConnection + redis_client.connection_pool = redis.ConnectionPool(**{ 'host': app.config.get('REDIS_HOST', 'localhost'), 'port': app.config.get('REDIS_PORT', 6379), + 'username': app.config.get('REDIS_USERNAME', None), 'password': app.config.get('REDIS_PASSWORD', None), 'db': app.config.get('REDIS_DB', 0), 'encoding': 'utf-8', 'encoding_errors': 'strict', 'decode_responses': False - }) + }, connection_class=connection_class) app.extensions['redis'] = redis_client diff --git a/api/extensions/ext_session.py b/api/extensions/ext_session.py index 5b454d469e..e03a22b0c8 100644 --- a/api/extensions/ext_session.py +++ b/api/extensions/ext_session.py @@ -1,4 +1,5 @@ import redis +from redis.connection import SSLConnection, Connection from flask import request from flask_session import Session, SqlAlchemySessionInterface, RedisSessionInterface from flask_session.sessions import total_seconds @@ -23,16 +24,21 @@ def init_app(app): if session_type == 'sqlalchemy': app.session_interface = sqlalchemy_session_interface elif session_type == 'redis': + connection_class = Connection + if app.config.get('SESSION_REDIS_USE_SSL', False): + connection_class = SSLConnection + sess_redis_client = redis.Redis() sess_redis_client.connection_pool = redis.ConnectionPool(**{ 'host': app.config.get('SESSION_REDIS_HOST', 'localhost'), 'port': app.config.get('SESSION_REDIS_PORT', 6379), + 'username': app.config.get('SESSION_REDIS_USERNAME', None), 'password': app.config.get('SESSION_REDIS_PASSWORD', None), 'db': app.config.get('SESSION_REDIS_DB', 2), 'encoding': 'utf-8', 'encoding_errors': 'strict', 'decode_responses': False - }) + }, connection_class=connection_class) app.extensions['session_redis'] = sess_redis_client diff --git a/api/libs/helper.py b/api/libs/helper.py index bbf01cbad7..767f368d33 100644 --- a/api/libs/helper.py +++ b/api/libs/helper.py @@ -21,7 +21,7 @@ class TimestampField(fields.Raw): def email(email): # Define a regex pattern for email addresses - pattern = r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$" + pattern = r"^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$" # Check if the email matches the pattern if re.match(pattern, email) is not None: return email diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 1a032c9137..9007dd825b 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -18,6 +18,7 @@ from services.errors.account import NoPermissionError from services.errors.dataset import DatasetNameDuplicateError from services.errors.document import DocumentIndexingError from services.errors.file import FileNotExistsError +from tasks.deal_dataset_vector_index_task import deal_dataset_vector_index_task from tasks.document_indexing_task import document_indexing_task @@ -97,7 +98,12 @@ class DatasetService: def update_dataset(dataset_id, data, user): dataset = DatasetService.get_dataset(dataset_id) DatasetService.check_dataset_permission(dataset, user) - + if dataset.indexing_technique != data['indexing_technique']: + # if update indexing_technique + if data['indexing_technique'] == 'economy': + deal_dataset_vector_index_task.delay(dataset_id, 'remove') + elif data['indexing_technique'] == 'high_quality': + deal_dataset_vector_index_task.delay(dataset_id, 'add') filtered_data = {k: v for k, v in data.items() if v is not None or k == 'description'} filtered_data['updated_by'] = user.id diff --git a/api/services/provider_service.py b/api/services/provider_service.py index 7f6c7c9303..39ee8353c0 100644 --- a/api/services/provider_service.py +++ b/api/services/provider_service.py @@ -62,6 +62,8 @@ class ProviderService: @staticmethod def validate_provider_configs(tenant, provider_name: ProviderName, configs: Union[dict | str]): + if current_app.config['DISABLE_PROVIDER_CONFIG_VALIDATION']: + return llm_provider_service = LLMProviderService(tenant.id, provider_name.value) return llm_provider_service.config_validate(configs) diff --git a/api/tasks/deal_dataset_vector_index_task.py b/api/tasks/deal_dataset_vector_index_task.py new file mode 100644 index 0000000000..f5f9129558 --- /dev/null +++ b/api/tasks/deal_dataset_vector_index_task.py @@ -0,0 +1,75 @@ +import logging +import time + +import click +from celery import shared_task +from llama_index.data_structs.node_v2 import DocumentRelationship, Node +from core.index.vector_index import VectorIndex +from extensions.ext_database import db +from models.dataset import DocumentSegment, Document, Dataset + + +@shared_task +def deal_dataset_vector_index_task(dataset_id: str, action: str): + """ + Async deal dataset from index + :param dataset_id: dataset_id + :param action: action + Usage: deal_dataset_vector_index_task.delay(dataset_id, action) + """ + logging.info(click.style('Start deal dataset vector index: {}'.format(dataset_id), fg='green')) + start_at = time.perf_counter() + + try: + dataset = Dataset.query.filter_by( + id=dataset_id + ).first() + if not dataset: + raise Exception('Dataset not found') + documents = Document.query.filter_by(dataset_id=dataset_id).all() + if documents: + vector_index = VectorIndex(dataset=dataset) + for document in documents: + # delete from vector index + if action == "remove": + vector_index.del_doc(document.id) + elif action == "add": + segments = db.session.query(DocumentSegment).filter( + DocumentSegment.document_id == document.id, + DocumentSegment.enabled == True + ) .order_by(DocumentSegment.position.asc()).all() + + nodes = [] + previous_node = None + for segment in segments: + relationships = { + DocumentRelationship.SOURCE: document.id + } + + if previous_node: + relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id + + previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id + + node = Node( + doc_id=segment.index_node_id, + doc_hash=segment.index_node_hash, + text=segment.content, + extra_info=None, + node_info=None, + relationships=relationships + ) + + previous_node = node + nodes.append(node) + # save vector index + vector_index.add_nodes( + nodes=nodes, + duplicate_check=True + ) + + end_at = time.perf_counter() + logging.info( + click.style('Deal dataset vector index: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green')) + except Exception: + logging.exception("Deal dataset vector index failed") diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index a337377cfa..04728184b3 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -36,14 +36,18 @@ services: # It is consistent with the configuration in the 'redis' service below. REDIS_HOST: redis REDIS_PORT: 6379 + REDIS_USERNAME: '' REDIS_PASSWORD: difyai123456 + REDIS_USE_SSL: 'false' # use redis db 0 for redis cache REDIS_DB: 0 # The configurations of session, Supported values are `sqlalchemy`. `redis` SESSION_TYPE: redis SESSION_REDIS_HOST: redis SESSION_REDIS_PORT: 6379 + SESSION_REDIS_USERNAME: '' SESSION_REDIS_PASSWORD: difyai123456 + SESSION_REDIS_USE_SSL: 'false' # use redis db 2 for session store SESSION_REDIS_DB: 2 # The configurations of celery broker. @@ -129,8 +133,10 @@ services: # The configurations of redis cache connection. REDIS_HOST: redis REDIS_PORT: 6379 + REDIS_USERNAME: '' REDIS_PASSWORD: difyai123456 REDIS_DB: 0 + REDIS_USE_SSL: 'false' # The configurations of celery broker. CELERY_BROKER_URL: redis://:difyai123456@redis:6379/1 # The type of storage to use for storing user files. Supported values are `local` and `s3`, Default: `local` diff --git a/mock-server/.gitignore b/mock-server/.gitignore deleted file mode 100644 index 02651453d8..0000000000 --- a/mock-server/.gitignore +++ /dev/null @@ -1,117 +0,0 @@ -# Logs -logs -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* -lerna-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) -report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json - -# Runtime data -pids -*.pid -*.seed -*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover -lib-cov - -# Coverage directory used by tools like istanbul -coverage -*.lcov - -# nyc test coverage -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) -.grunt - -# Bower dependency directory (https://bower.io/) -bower_components - -# node-waf configuration -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) -build/Release - -# Dependency directories -node_modules/ -jspm_packages/ - -# TypeScript v1 declaration files -typings/ - -# TypeScript cache -*.tsbuildinfo - -# Optional npm cache directory -.npm - -# Optional eslint cache -.eslintcache - -# Microbundle cache -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history -.node_repl_history - -# Output of 'npm pack' -*.tgz - -# Yarn Integrity file -.yarn-integrity - -# dotenv environment variables file -.env -.env.test - -# parcel-bundler cache (https://parceljs.org/) -.cache - -# Next.js build output -.next - -# Nuxt.js build / generate output -.nuxt -dist - -# Gatsby files -.cache/ -# Comment in the public line in if your project uses Gatsby and *not* Next.js -# https://nextjs.org/blog/next-9-1#public-directory-support -# public - -# vuepress build output -.vuepress/dist - -# Serverless directories -.serverless/ - -# FuseBox cache -.fusebox/ - -# DynamoDB Local files -.dynamodb/ - -# TernJS port file -.tern-port - -# npm -package-lock.json - -# yarn -.pnp.cjs -.pnp.loader.mjs -.yarn/ -yarn.lock -.yarnrc.yml - -# pmpm -pnpm-lock.yaml \ No newline at end of file diff --git a/mock-server/README.md b/mock-server/README.md deleted file mode 100644 index 7b0a621e84..0000000000 --- a/mock-server/README.md +++ /dev/null @@ -1 +0,0 @@ -# Mock Server diff --git a/mock-server/api/apps.js b/mock-server/api/apps.js deleted file mode 100644 index d704387376..0000000000 --- a/mock-server/api/apps.js +++ /dev/null @@ -1,551 +0,0 @@ -const chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-_' - -function randomString (length) { - let result = '' - for (let i = length; i > 0; --i) result += chars[Math.floor(Math.random() * chars.length)] - return result -} - -// https://www.notion.so/55773516a0194781ae211792a44a3663?pvs=4 -const VirtualData = new Array(10).fill().map((_, index) => { - const date = new Date(Date.now() - index * 24 * 60 * 60 * 1000) - return { - date: `${date.getFullYear()}-${date.getMonth()}-${date.getDate()}`, - conversation_count: Math.floor(Math.random() * 10) + index, - terminal_count: Math.floor(Math.random() * 10) + index, - token_count: Math.floor(Math.random() * 10) + index, - total_price: Math.floor(Math.random() * 10) + index, - } -}) - -const registerAPI = function (app) { - const apps = [{ - id: '1', - name: 'chat app', - mode: 'chat', - description: 'description01', - enable_site: true, - enable_api: true, - api_rpm: 60, - api_rph: 3600, - is_demo: false, - model_config: { - provider: 'OPENAI', - model_id: 'gpt-3.5-turbo', - configs: { - prompt_template: '你是我的解梦小助手,请参考 {{book}} 回答我有关梦境的问题。在回答前请称呼我为 {{myName}}。', - prompt_variables: [ - { - key: 'book', - name: '书', - value: '《梦境解析》', - type: 'string', - description: '请具体说下书名' - }, - { - key: 'myName', - name: 'your name', - value: 'Book', - type: 'string', - description: 'please tell me your name' - } - ], - completion_params: { - max_token: 16, - temperature: 1, // 0-2 - top_p: 1, - presence_penalty: 1, // -2-2 - frequency_penalty: 1, // -2-2 - } - } - }, - site: { - access_token: '1000', - title: 'site 01', - author: 'John', - default_language: 'zh-Hans-CN', - customize_domain: 'http://customize_domain', - theme: 'theme', - customize_token_strategy: 'must', - prompt_public: true - } - }, - { - id: '2', - name: 'completion app', - mode: 'completion', // genertation text - description: 'description 02', // genertation text - enable_site: false, - enable_api: false, - api_rpm: 60, - api_rph: 3600, - is_demo: false, - model_config: { - provider: 'OPENAI', - model_id: 'text-davinci-003', - configs: { - prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:', - prompt_variables: [ - { - key: 'langA', - name: '原始语音', - value: '中文', - type: 'string', - description: '这是中文格式的原始语音' - }, - { - key: 'langB', - name: '目标语言', - value: '英语', - type: 'string', - description: '这是英语格式的目标语言' - } - ], - completion_params: { - max_token: 16, - temperature: 1, // 0-2 - top_p: 1, - presence_penalty: 1, // -2-2 - frequency_penalty: 1, // -2-2 - } - } - }, - site: { - access_token: '2000', - title: 'site 02', - author: 'Mark', - default_language: 'en-US', - customize_domain: 'http://customize_domain', - theme: 'theme', - customize_token_strategy: 'must', - prompt_public: false - } - }, - ] - - const apikeys = [{ - id: '111121312313132', - token: 'sk-DEFGHJKMNPQRSTWXYZabcdefhijk1234', - last_used_at: '1679212138000', - created_at: '1673316000000' - }, { - id: '43441242131223123', - token: 'sk-EEFGHJKMNPQRSTWXYZabcdefhijk5678', - last_used_at: '1679212721000', - created_at: '1679212731000' - }] - - // create app - app.post('/apps', async (req, res) => { - apps.push({ - id: apps.length + 1 + '', - ...req.body, - - }) - res.send({ - result: 'success' - }) - }) - - // app list - app.get('/apps', async (req, res) => { - res.send({ - data: apps - }) - }) - - // app detail - app.get('/apps/:id', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) || apps[0] - res.send(item) - }) - - // update app name - app.post('/apps/:id/name', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - item.name = req.body.name - res.send(item || null) - }) - - // update app site-enable status - app.post('/apps/:id/site-enable', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.enable_site = req.body.enable_site - res.send(item || null) - }) - - // update app api-enable status - app.post('/apps/:id/api-enable', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.enable_api = req.body.enable_api - res.send(item || null) - }) - - // update app rate-limit - app.post('/apps/:id/rate-limit', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.api_rpm = req.body.api_rpm - item.api_rph = req.body.api_rph - res.send(item || null) - }) - - // update app url including code - app.post('/apps/:id/site/access-token-reset', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.site.access_token = randomString(12) - res.send(item || null) - }) - - // update app config - app.post('/apps/:id/site', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.name = req.body.title - item.description = req.body.description - item.prompt_public = req.body.prompt_public - item.default_language = req.body.default_language - res.send(item || null) - }) - - // get statistics daily-conversations - app.get('/apps/:id/statistics/daily-conversations', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - if (item) { - res.send({ - data: VirtualData - }) - } else { - res.send({ - data: [] - }) - } - }) - - // get statistics daily-end-users - app.get('/apps/:id/statistics/daily-end-users', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - if (item) { - res.send({ - data: VirtualData - }) - } else { - res.send({ - data: [] - }) - } - }) - - // get statistics token-costs - app.get('/apps/:id/statistics/token-costs', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - if (item) { - res.send({ - data: VirtualData - }) - } else { - res.send({ - data: [] - }) - } - }) - - // update app model config - app.post('/apps/:id/model-config', async (req, res) => { - const item = apps.find(item => item.id === req.params.id) - console.log(item) - item.model_config = req.body - res.send(item || null) - }) - - - // get api keys list - app.get('/apps/:id/api-keys', async (req, res) => { - res.send({ - data: apikeys - }) - }) - - // del api key - app.delete('/apps/:id/api-keys/:api_key_id', async (req, res) => { - res.send({ - result: 'success' - }) - }) - - // create api key - app.post('/apps/:id/api-keys', async (req, res) => { - res.send({ - id: 'e2424241313131', - token: 'sk-GEFGHJKMNPQRSTWXYZabcdefhijk0124', - created_at: '1679216688962' - }) - }) - - // get completion-conversations - app.get('/apps/:id/completion-conversations', async (req, res) => { - const data = { - data: [{ - id: 1, - from_end_user_id: 'user 1', - summary: 'summary1', - created_at: '2023-10-11', - annotated: true, - message_count: 100, - user_feedback_stats: { - like: 4, dislike: 5 - }, - admin_feedback_stats: { - like: 1, dislike: 2 - }, - message: { - message: 'message1', - query: 'question1', - answer: 'answer1' - } - }, { - id: 12, - from_end_user_id: 'user 2', - summary: 'summary2', - created_at: '2023-10-01', - annotated: false, - message_count: 10, - user_feedback_stats: { - like: 2, dislike: 20 - }, - admin_feedback_stats: { - like: 12, dislike: 21 - }, - message: { - message: 'message2', - query: 'question2', - answer: 'answer2' - } - }, { - id: 13, - from_end_user_id: 'user 3', - summary: 'summary3', - created_at: '2023-10-11', - annotated: false, - message_count: 20, - user_feedback_stats: { - like: 2, dislike: 0 - }, - admin_feedback_stats: { - like: 0, dislike: 21 - }, - message: { - message: 'message3', - query: 'question3', - answer: 'answer3' - } - }], - total: 200 - } - res.send(data) - }) - - // get chat-conversations - app.get('/apps/:id/chat-conversations', async (req, res) => { - const data = { - data: [{ - id: 1, - from_end_user_id: 'user 1', - summary: 'summary1', - created_at: '2023-10-11', - read_at: '2023-10-12', - annotated: true, - message_count: 100, - user_feedback_stats: { - like: 4, dislike: 5 - }, - admin_feedback_stats: { - like: 1, dislike: 2 - }, - message: { - message: 'message1', - query: 'question1', - answer: 'answer1' - } - }, { - id: 12, - from_end_user_id: 'user 2', - summary: 'summary2', - created_at: '2023-10-01', - annotated: false, - message_count: 10, - user_feedback_stats: { - like: 2, dislike: 20 - }, - admin_feedback_stats: { - like: 12, dislike: 21 - }, - message: { - message: 'message2', - query: 'question2', - answer: 'answer2' - } - }, { - id: 13, - from_end_user_id: 'user 3', - summary: 'summary3', - created_at: '2023-10-11', - annotated: false, - message_count: 20, - user_feedback_stats: { - like: 2, dislike: 0 - }, - admin_feedback_stats: { - like: 0, dislike: 21 - }, - message: { - message: 'message3', - query: 'question3', - answer: 'answer3' - } - }], - total: 200 - } - res.send(data) - }) - - // get completion-conversation detail - app.get('/apps/:id/completion-conversations/:cid', async (req, res) => { - const data = - { - id: 1, - from_end_user_id: 'user 1', - summary: 'summary1', - created_at: '2023-10-11', - annotated: true, - message: { - message: 'question1', - // query: 'question1', - answer: 'answer1', - annotation: { - content: '这是一段纠正的内容' - } - }, - model_config: { - provider: 'openai', - model_id: 'model_id', - configs: { - prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:{{content}}' - } - } - } - res.send(data) - }) - - // get chat-conversation detail - app.get('/apps/:id/chat-conversations/:cid', async (req, res) => { - const data = - { - id: 1, - from_end_user_id: 'user 1', - summary: 'summary1', - created_at: '2023-10-11', - annotated: true, - message: { - message: 'question1', - // query: 'question1', - answer: 'answer1', - created_at: '2023-08-09 13:00', - provider_response_latency: 130, - message_tokens: 230 - }, - model_config: { - provider: 'openai', - model_id: 'model_id', - configs: { - prompt_template: '你是我的翻译小助手,请把以下内容 {{langA}} 翻译成 {{langB}},以下的内容:{{content}}' - } - } - } - res.send(data) - }) - - // get chat-conversation message list - app.get('/apps/:id/chat-messages', async (req, res) => { - const data = { - data: [{ - id: 1, - created_at: '2023-10-11 07:09', - message: '请说说人为什么会做梦?' + req.query.conversation_id, - answer: '梦境通常是个人内心深处的反映,很难确定每个人梦境的确切含义,因为它们可能会受到梦境者的文化背景、生活经验和情感状态等多种因素的影响。', - provider_response_latency: 450, - answer_tokens: 200, - annotation: { - content: 'string', - account: { - id: 'string', - name: 'string', - email: 'string' - } - }, - feedbacks: { - rating: 'like', - content: 'string', - from_source: 'log' - } - }, { - id: 2, - created_at: '2023-10-11 8:23', - message: '夜里经常做梦会影响次日的精神状态吗?', - answer: '总之,这个梦境可能与梦境者的个人经历和情感状态有关,但在一般情况下,它可能表示一种强烈的情感反应,包括愤怒、不满和对于正义和自由的渴望。', - provider_response_latency: 400, - answer_tokens: 250, - annotation: { - content: 'string', - account: { - id: 'string', - name: 'string', - email: 'string' - } - }, - // feedbacks: { - // rating: 'like', - // content: 'string', - // from_source: 'log' - // } - }, { - id: 3, - created_at: '2023-10-11 10:20', - message: '梦见在山上手撕鬼子,大师解解梦', - answer: '但是,一般来说,“手撕鬼子”这个场景可能是梦境者对于过去历史上的战争、侵略以及对于自己国家和族群的保护与维护的情感反应。在梦中,你可能会感到自己充满力量和勇气,去对抗那些看似强大的侵略者。', - provider_response_latency: 288, - answer_tokens: 100, - annotation: { - content: 'string', - account: { - id: 'string', - name: 'string', - email: 'string' - } - }, - feedbacks: { - rating: 'dislike', - content: 'string', - from_source: 'log' - } - }], - limit: 20, - has_more: true - } - res.send(data) - }) - - app.post('/apps/:id/annotations', async (req, res) => { - res.send({ result: 'success' }) - }) - - app.post('/apps/:id/feedbacks', async (req, res) => { - res.send({ result: 'success' }) - }) - -} - -module.exports = registerAPI \ No newline at end of file diff --git a/mock-server/api/common.js b/mock-server/api/common.js deleted file mode 100644 index 3e43ad524a..0000000000 --- a/mock-server/api/common.js +++ /dev/null @@ -1,38 +0,0 @@ - -const registerAPI = function (app) { - app.post('/login', async (req, res) => { - res.send({ - result: 'success' - }) - }) - - // get user info - app.get('/account/profile', async (req, res) => { - res.send({ - id: '11122222', - name: 'Joel', - email: 'iamjoel007@gmail.com' - }) - }) - - // logout - app.get('/logout', async (req, res) => { - res.send({ - result: 'success' - }) - }) - - // Langgenius version - app.get('/version', async (req, res) => { - res.send({ - current_version: 'v1.0.0', - latest_version: 'v1.0.0', - upgradeable: true, - compatible_upgrade: true - }) - }) - -} - -module.exports = registerAPI - diff --git a/mock-server/api/datasets.js b/mock-server/api/datasets.js deleted file mode 100644 index 0821b3786b..0000000000 --- a/mock-server/api/datasets.js +++ /dev/null @@ -1,249 +0,0 @@ -const registerAPI = function (app) { - app.get("/datasets/:id/documents", async (req, res) => { - if (req.params.id === "0") res.send({ data: [] }); - else { - res.send({ - data: [ - { - id: 1, - name: "Steve Jobs' life", - words: "70k", - word_count: 100, - updated_at: 1681801029, - indexing_status: "completed", - archived: true, - enabled: false, - data_source_info: { - upload_file: { - // id: string - // name: string - // size: number - // mime_type: string - // created_at: number - // created_by: string - extension: "pdf", - }, - }, - }, - { - id: 2, - name: "Steve Jobs' life", - word_count: "10k", - hit_count: 10, - updated_at: 1681801029, - indexing_status: "waiting", - archived: true, - enabled: false, - data_source_info: { - upload_file: { - extension: "json", - }, - }, - }, - { - id: 3, - name: "Steve Jobs' life xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - word_count: "100k", - hit_count: 0, - updated_at: 1681801029, - indexing_status: "indexing", - archived: false, - enabled: true, - data_source_info: { - upload_file: { - extension: "txt", - }, - }, - }, - { - id: 4, - name: "Steve Jobs' life xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - word_count: "100k", - hit_count: 0, - updated_at: 1681801029, - indexing_status: "splitting", - archived: false, - enabled: true, - data_source_info: { - upload_file: { - extension: "md", - }, - }, - }, - { - id: 5, - name: "Steve Jobs' life", - word_count: "100k", - hit_count: 0, - updated_at: 1681801029, - indexing_status: "error", - archived: false, - enabled: false, - data_source_info: { - upload_file: { - extension: "html", - }, - }, - }, - ], - total: 100, - id: req.params.id, - }); - } - }); - - app.get("/datasets/:id/documents/:did/segments", async (req, res) => { - if (req.params.id === "0") res.send({ data: [] }); - else { - res.send({ - data: new Array(100).fill({ - id: 1234, - content: `他的坚持让我很为难。众所周知他非常注意保护自己的隐私,而我想他应该从来没有看过我写的书。也许将来的某个时候吧,我还是这么说。但是,到了2009年,他的妻子劳伦·鲍威尔(Laurene Powell)直言不讳地对我说:“如果你真的打算写一本关于史蒂夫的书,最好现在就开始。”他当时刚刚第二次因病休假。我向劳伦坦承,当乔布斯第一次提出这个想法时,我并不知道他病了。几乎没有人知道,她说。他是在接受癌症手术之前给我打的电话,直到今天他还将此事作为一个秘密,她这么解释道。\n - 他的坚持让我很为难。众所周知他非常注意保护自己的隐私,而我想他应该从来没有看过我写的书。也许将来的某个时候吧,我还是这么说。但是,到了2009年,他的妻子劳伦·鲍威尔(Laurene Powell)直言不讳地对我说:“如果你真的打算写一本关于史蒂夫的书,最好现在就开始。”他当时刚刚第二次因病休假。我向劳伦坦承,当乔布斯第一次提出这个想法时,我并不知道他病了。几乎没有人知道,她说。他是在接受癌症手术之前给我打的电话,直到今天他还将此事作为一个秘密,她这么解释道。`, - enabled: true, - keyWords: [ - "劳伦·鲍威尔", - "劳伦·鲍威尔", - "手术", - "秘密", - "癌症", - "乔布斯", - "史蒂夫", - "书", - "休假", - "坚持", - "隐私", - ], - word_count: 120, - hit_count: 100, - status: "ok", - index_node_hash: "index_node_hash value", - }), - limit: 100, - has_more: true, - }); - } - }); - - // get doc detail - app.get("/datasets/:id/documents/:did", async (req, res) => { - const fixedParams = { - // originInfo: { - originalFilename: "Original filename", - originalFileSize: "16mb", - uploadDate: "2023-01-01", - lastUpdateDate: "2023-01-05", - source: "Source", - // }, - // technicalParameters: { - segmentSpecification: "909090", - segmentLength: 100, - avgParagraphLength: 130, - }; - const bookData = { - doc_type: "book", - doc_metadata: { - title: "机器学习实战", - language: "zh", - author: "Peter Harrington", - publisher: "人民邮电出版社", - publicationDate: "2013-01-01", - ISBN: "9787115335500", - category: "技术", - }, - }; - const webData = { - doc_type: "webPage", - doc_metadata: { - title: "深度学习入门教程", - url: "https://www.example.com/deep-learning-tutorial", - language: "zh", - publishDate: "2020-05-01", - authorPublisher: "张三", - topicsKeywords: "深度学习, 人工智能, 教程", - description: - "这是一篇详细的深度学习入门教程,适用于对人工智能和深度学习感兴趣的初学者。", - }, - }; - const postData = { - doc_type: "socialMediaPost", - doc_metadata: { - platform: "Twitter", - authorUsername: "example_user", - publishDate: "2021-08-15", - postURL: "https://twitter.com/example_user/status/1234567890", - topicsTags: - "AI, DeepLearning, Tutorial, Example, Example2, Example3, AI, DeepLearning, Tutorial, Example, Example2, Example3, AI, DeepLearning, Tutorial, Example, Example2, Example3,", - }, - }; - res.send({ - id: "550e8400-e29b-41d4-a716-446655440000", - position: 1, - dataset_id: "550e8400-e29b-41d4-a716-446655440002", - data_source_type: "upload_file", - data_source_info: { - upload_file: { - extension: "html", - id: "550e8400-e29b-41d4-a716-446655440003", - }, - }, - dataset_process_rule_id: "550e8400-e29b-41d4-a716-446655440004", - batch: "20230410123456123456", - name: "example_document", - created_from: "web", - created_by: "550e8400-e29b-41d4-a716-446655440005", - created_api_request_id: "550e8400-e29b-41d4-a716-446655440006", - created_at: 1671269696, - processing_started_at: 1671269700, - word_count: 11, - parsing_completed_at: 1671269710, - cleaning_completed_at: 1671269720, - splitting_completed_at: 1671269730, - tokens: 10, - indexing_latency: 5.0, - completed_at: 1671269740, - paused_by: null, - paused_at: null, - error: null, - stopped_at: null, - indexing_status: "completed", - enabled: true, - disabled_at: null, - disabled_by: null, - archived: false, - archived_reason: null, - archived_by: null, - archived_at: null, - updated_at: 1671269740, - ...(req.params.did === "book" - ? bookData - : req.params.did === "web" - ? webData - : req.params.did === "post" - ? postData - : {}), - segment_count: 10, - hit_count: 9, - status: "ok", - }); - }); - - // // logout - // app.get("/logout", async (req, res) => { - // res.send({ - // result: "success", - // }); - // }); - - // // Langgenius version - // app.get("/version", async (req, res) => { - // res.send({ - // current_version: "v1.0.0", - // latest_version: "v1.0.0", - // upgradeable: true, - // compatible_upgrade: true, - // }); - // }); -}; - -module.exports = registerAPI; diff --git a/mock-server/api/debug.js b/mock-server/api/debug.js deleted file mode 100644 index 2e6f3ca0a7..0000000000 --- a/mock-server/api/debug.js +++ /dev/null @@ -1,119 +0,0 @@ -const registerAPI = function (app) { - const coversationList = [ - { - id: '1', - name: '梦的解析', - inputs: { - book: '《梦的解析》', - callMe: '大师', - }, - chats: [] - }, - { - id: '2', - name: '生命的起源', - inputs: { - book: '《x x x》', - } - }, - ] - // site info - app.get('/apps/site/info', async (req, res) => { - // const id = req.params.id - res.send({ - enable_site: true, - appId: '1', - site: { - title: 'Story Bot', - description: '这是一款解梦聊天机器人,你可以选择你喜欢的解梦人进行解梦,这句话是客户端应用说明', - }, - prompt_public: true, //id === '1', - prompt_template: '你是我的解梦小助手,请参考 {{book}} 回答我有关梦境的问题。在回答前请称呼我为 {{myName}}。', - }) - }) - - app.post('/apps/:id/chat-messages', async (req, res) => { - const conversationId = req.body.conversation_id ? req.body.conversation_id : Date.now() + '' - res.send({ - id: Date.now() + '', - conversation_id: Date.now() + '', - answer: 'balabababab' - }) - }) - - app.post('/apps/:id/completion-messages', async (req, res) => { - res.send({ - id: Date.now() + '', - answer: `做为一个AI助手,我可以为你提供随机生成的段落,这些段落可以用于测试、占位符、或者其他目的。以下是一个随机生成的段落: - - “随着科技的不断发展,越来越多的人开始意识到人工智能的重要性。人工智能已经成为我们生活中不可或缺的一部分,它可以帮助我们完成很多繁琐的工作,也可以为我们提供更智能、更便捷的服务。虽然人工智能带来了很多好处,但它也面临着很多挑战。例如,人工智能的算法可能会出现偏见,导致对某些人群不公平。此外,人工智能的发展也可能会导致一些工作的失业。因此,我们需要不断地研究人工智能的发展,以确保它能够为人类带来更多的好处。”` - }) - }) - - // share api - // chat list - app.get('/apps/:id/coversations', async (req, res) => { - res.send({ - data: coversationList - }) - }) - - - - app.get('/apps/:id/variables', async (req, res) => { - res.send({ - variables: [ - { - key: 'book', - name: '书', - value: '《梦境解析》', - type: 'string' - }, - { - key: 'myName', - name: '称呼', - value: '', - type: 'string' - } - ], - }) - }) - -} - -module.exports = registerAPI - -// const chatList = [ -// { -// id: 1, -// content: 'AI 开场白', -// isAnswer: true, -// }, -// { -// id: 2, -// content: '梦见在山上手撕鬼子,大师解解梦', -// more: { time: '5.6 秒' }, -// }, -// { -// id: 3, -// content: '梦境通常是个人内心深处的反映,很难确定每个人梦境的确切含义,因为它们可能会受到梦境者的文化背景、生活经验和情感状态等多种因素的影响。', -// isAnswer: true, -// more: { time: '99 秒' }, - -// }, -// { -// id: 4, -// content: '梦见在山上手撕鬼子,大师解解梦', -// more: { time: '5.6 秒' }, -// }, -// { -// id: 5, -// content: '梦见在山上手撕鬼子,大师解解梦', -// more: { time: '5.6 秒' }, -// }, -// { -// id: 6, -// content: '梦见在山上手撕鬼子,大师解解梦', -// more: { time: '5.6 秒' }, -// }, -// ] \ No newline at end of file diff --git a/mock-server/api/demo.js b/mock-server/api/demo.js deleted file mode 100644 index 8f8a35079b..0000000000 --- a/mock-server/api/demo.js +++ /dev/null @@ -1,15 +0,0 @@ -const registerAPI = function (app) { - app.get('/demo', async (req, res) => { - res.send({ - des: 'get res' - }) - }) - - app.post('/demo', async (req, res) => { - res.send({ - des: 'post res' - }) - }) -} - -module.exports = registerAPI \ No newline at end of file diff --git a/mock-server/app.js b/mock-server/app.js deleted file mode 100644 index 96eec0ab2a..0000000000 --- a/mock-server/app.js +++ /dev/null @@ -1,42 +0,0 @@ -const express = require('express') -const app = express() -const bodyParser = require('body-parser') -var cors = require('cors') - -const commonAPI = require('./api/common') -const demoAPI = require('./api/demo') -const appsApi = require('./api/apps') -const debugAPI = require('./api/debug') -const datasetsAPI = require('./api/datasets') - -const port = 3001 - -app.use(bodyParser.json()) // for parsing application/json -app.use(bodyParser.urlencoded({ extended: true })) // for parsing application/x-www-form-urlencoded - -const corsOptions = { - origin: true, - credentials: true, -} -app.use(cors(corsOptions)) // for cross origin -app.options('*', cors(corsOptions)) // include before other routes - - -demoAPI(app) -commonAPI(app) -appsApi(app) -debugAPI(app) -datasetsAPI(app) - - -app.get('/', (req, res) => { - res.send('rootpath') -}) - -app.listen(port, () => { - console.log(`Mock run on port ${port}`) -}) - -const sleep = (ms) => { - return new Promise(resolve => setTimeout(resolve, ms)) -} diff --git a/mock-server/package.json b/mock-server/package.json deleted file mode 100644 index 11a68d61e7..0000000000 --- a/mock-server/package.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "name": "server", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "dev": "nodemon node app.js", - "start": "node app.js", - "tcp": "node tcp.js" - }, - "keywords": [], - "author": "", - "license": "MIT", - "engines": { - "node": ">=16.0.0" - }, - "dependencies": { - "body-parser": "^1.20.2", - "cors": "^2.8.5", - "express": "4.18.2", - "express-jwt": "8.4.1" - }, - "devDependencies": { - "nodemon": "2.0.21" - } -} diff --git a/web/.env.example b/web/.env.example new file mode 100644 index 0000000000..acd853834f --- /dev/null +++ b/web/.env.example @@ -0,0 +1,12 @@ +# For production release, change this to PRODUCTION +NEXT_PUBLIC_DEPLOY_ENV=DEVELOPMENT +# The deployment edition, SELF_HOSTED or CLOUD +NEXT_PUBLIC_EDITION=SELF_HOSTED +# The base URL of console application, refers to the Console base URL of WEB service if console domain is +# different from api or web app domain. +# example: http://cloud.dify.ai/console/api +NEXT_PUBLIC_API_PREFIX=http://localhost:5001/console/api +# The URL for Web APP, refers to the Web App base URL of WEB service if web app domain is different from +# console or api domain. +# example: http://udify.app/api +NEXT_PUBLIC_PUBLIC_API_PREFIX=http://localhost:5001/api \ No newline at end of file diff --git a/web/app/(commonLayout)/_layout-client.tsx b/web/app/(commonLayout)/_layout-client.tsx index 8624091de2..799f8985d3 100644 --- a/web/app/(commonLayout)/_layout-client.tsx +++ b/web/app/(commonLayout)/_layout-client.tsx @@ -1,5 +1,5 @@ 'use client' -import type { FC } from 'react' +import { FC, useRef } from 'react' import React, { useEffect, useState } from 'react' import { usePathname, useRouter, useSelectedLayoutSegments } from 'next/navigation' import useSWR, { SWRConfig } from 'swr' @@ -8,7 +8,7 @@ import { fetchAppList } from '@/service/apps' import { fetchDatasets } from '@/service/datasets' import { fetchLanggeniusVersion, fetchUserProfile, logout } from '@/service/common' import Loading from '@/app/components/base/loading' -import AppContext from '@/context/app-context' +import { AppContextProvider } from '@/context/app-context' import DatasetsContext from '@/context/datasets-context' import type { LangGeniusVersionResponse, UserProfileResponse } from '@/models/common' @@ -23,6 +23,7 @@ const CommonLayout: FC = ({ children }) => { const pattern = pathname.replace(/.*\/app\//, '') const [idOrMethod] = pattern.split('/') const isNotDetailPage = idOrMethod === 'list' + const pageContainerRef = useRef(null) const appId = isNotDetailPage ? '' : idOrMethod @@ -71,14 +72,14 @@ const CommonLayout: FC = ({ children }) => { - - -
+ + +
{children}
- +
) } diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx index 049d908dde..97b0164ea1 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout.tsx @@ -49,7 +49,7 @@ const AppDetailLayout: FC = (props) => { return null return (
- +
{children}
) diff --git a/web/app/(commonLayout)/apps/AppCard.tsx b/web/app/(commonLayout)/apps/AppCard.tsx index eb62cd8899..f08ce3c7a9 100644 --- a/web/app/(commonLayout)/apps/AppCard.tsx +++ b/web/app/(commonLayout)/apps/AppCard.tsx @@ -16,10 +16,12 @@ import AppsContext from '@/context/app-context' export type AppCardProps = { app: App + onDelete?: () => void } const AppCard = ({ app, + onDelete }: AppCardProps) => { const { t } = useTranslation() const { notify } = useContext(ToastContext) @@ -35,6 +37,8 @@ const AppCard = ({ try { await deleteApp(app.id) notify({ type: 'success', message: t('app.appDeleted') }) + if (onDelete) + onDelete() mutateApps() } catch (e: any) { @@ -47,7 +51,7 @@ const AppCard = ({ <>
- +
{app.name}
diff --git a/web/app/(commonLayout)/apps/Apps.tsx b/web/app/(commonLayout)/apps/Apps.tsx index b11b0da6a0..11da845165 100644 --- a/web/app/(commonLayout)/apps/Apps.tsx +++ b/web/app/(commonLayout)/apps/Apps.tsx @@ -1,21 +1,51 @@ 'use client' -import { useEffect } from 'react' +import { useEffect, useRef } from 'react' +import useSWRInfinite from 'swr/infinite' +import { debounce } from 'lodash-es' import AppCard from './AppCard' import NewAppCard from './NewAppCard' -import { useAppContext } from '@/context/app-context' +import { AppListResponse } from '@/models/app' +import { fetchAppList } from '@/service/apps' +import { useSelector } from '@/context/app-context' + +const getKey = (pageIndex: number, previousPageData: AppListResponse) => { + if (!pageIndex || previousPageData.has_more) + return { url: 'apps', params: { page: pageIndex + 1, limit: 30 } } + return null +} const Apps = () => { - const { apps, mutateApps } = useAppContext() + const { data, isLoading, setSize, mutate } = useSWRInfinite(getKey, fetchAppList, { revalidateFirstPage: false }) + const loadingStateRef = useRef(false) + const pageContainerRef = useSelector(state => state.pageContainerRef) + const anchorRef = useRef(null) useEffect(() => { - mutateApps() + loadingStateRef.current = isLoading + }, [isLoading]) + + useEffect(() => { + const onScroll = debounce(() => { + if (!loadingStateRef.current) { + const { scrollTop, clientHeight } = pageContainerRef.current! + const anchorOffset = anchorRef.current!.offsetTop + if (anchorOffset - scrollTop - clientHeight < 100) { + setSize(size => size + 1) + } + } + }, 50) + + pageContainerRef.current?.addEventListener('scroll', onScroll) + return () => pageContainerRef.current?.removeEventListener('scroll', onScroll) }, []) return ( ) } diff --git a/web/app/(commonLayout)/apps/NewAppCard.tsx b/web/app/(commonLayout)/apps/NewAppCard.tsx index 7fee93534e..f8cfb4062c 100644 --- a/web/app/(commonLayout)/apps/NewAppCard.tsx +++ b/web/app/(commonLayout)/apps/NewAppCard.tsx @@ -1,17 +1,20 @@ 'use client' -import { useState } from 'react' +import { forwardRef, useState } from 'react' import classNames from 'classnames' import { useTranslation } from 'react-i18next' import style from '../list.module.css' import NewAppDialog from './NewAppDialog' -const CreateAppCard = () => { +export type CreateAppCardProps = { + onSuccess?: () => void +} + +const CreateAppCard = forwardRef(({ onSuccess }, ref) => { const { t } = useTranslation() const [showNewAppDialog, setShowNewAppDialog] = useState(false) - return ( - setShowNewAppDialog(true)}> + setShowNewAppDialog(true)}>
@@ -21,9 +24,9 @@ const CreateAppCard = () => {
{/*
{t('app.createFromConfigFile')}
*/} - setShowNewAppDialog(false)} /> + setShowNewAppDialog(false)} />
) -} +}) export default CreateAppCard diff --git a/web/app/(commonLayout)/apps/NewAppDialog.tsx b/web/app/(commonLayout)/apps/NewAppDialog.tsx index 3b434fa3b2..e378560dd4 100644 --- a/web/app/(commonLayout)/apps/NewAppDialog.tsx +++ b/web/app/(commonLayout)/apps/NewAppDialog.tsx @@ -17,12 +17,15 @@ import { createApp, fetchAppTemplates } from '@/service/apps' import AppIcon from '@/app/components/base/app-icon' import AppsContext from '@/context/app-context' +import EmojiPicker from '@/app/components/base/emoji-picker' + type NewAppDialogProps = { show: boolean + onSuccess?: () => void onClose?: () => void } -const NewAppDialog = ({ show, onClose }: NewAppDialogProps) => { +const NewAppDialog = ({ show, onSuccess, onClose }: NewAppDialogProps) => { const router = useRouter() const { notify } = useContext(ToastContext) const { t } = useTranslation() @@ -31,6 +34,11 @@ const NewAppDialog = ({ show, onClose }: NewAppDialogProps) => { const [newAppMode, setNewAppMode] = useState() const [isWithTemplate, setIsWithTemplate] = useState(false) const [selectedTemplateIndex, setSelectedTemplateIndex] = useState(-1) + + // Emoji Picker + const [showEmojiPicker, setShowEmojiPicker] = useState(false) + const [emoji, setEmoji] = useState({ icon: '🍌', icon_background: '#FFEAD5' }) + const mutateApps = useContextSelector(AppsContext, state => state.mutateApps) const { data: templates, mutate } = useSWR({ url: '/app-templates' }, fetchAppTemplates) @@ -67,9 +75,13 @@ const NewAppDialog = ({ show, onClose }: NewAppDialogProps) => { try { const app = await createApp({ name, + icon: emoji.icon, + icon_background: emoji.icon_background, mode: isWithTemplate ? templates.data[selectedTemplateIndex].mode : newAppMode!, config: isWithTemplate ? templates.data[selectedTemplateIndex].model_config : undefined, }) + if (onSuccess) + onSuccess() if (onClose) onClose() notify({ type: 'success', message: t('app.newApp.appCreated') }) @@ -80,9 +92,20 @@ const NewAppDialog = ({ show, onClose }: NewAppDialogProps) => { notify({ type: 'error', message: t('app.newApp.appCreateFailed') }) } isCreatingRef.current = false - }, [isWithTemplate, newAppMode, notify, router, templates, selectedTemplateIndex]) + }, [isWithTemplate, newAppMode, notify, router, templates, selectedTemplateIndex, emoji]) - return ( + return <> + {showEmojiPicker && { + console.log(icon, icon_background) + setEmoji({ icon, icon_background }) + setShowEmojiPicker(false) + }} + onClose={() => { + setEmoji({ icon: '🍌', icon_background: '#FFEAD5' }) + setShowEmojiPicker(false) + }} + />} {

{t('app.newApp.captionName')}

- + { setShowEmojiPicker(true) }} className='cursor-pointer' icon={emoji.icon} background={emoji.icon_background} />
@@ -187,7 +210,7 @@ const NewAppDialog = ({ show, onClose }: NewAppDialogProps) => { )}
- ) + } export default NewAppDialog diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx index 1dc6578977..48b0bbc9c5 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout.tsx @@ -155,6 +155,8 @@ const DatasetDetailLayout: FC = (props) => {
{!hideSideBar && } diff --git a/web/app/(commonLayout)/datasets/DatasetCard.tsx b/web/app/(commonLayout)/datasets/DatasetCard.tsx index b6786d0519..a27ac5955c 100644 --- a/web/app/(commonLayout)/datasets/DatasetCard.tsx +++ b/web/app/(commonLayout)/datasets/DatasetCard.tsx @@ -18,16 +18,16 @@ import classNames from 'classnames' export type DatasetCardProps = { dataset: DataSet + onDelete?: () => void } const DatasetCard = ({ dataset, + onDelete }: DatasetCardProps) => { const { t } = useTranslation() const { notify } = useContext(ToastContext) - const { mutate: mutateDatasets } = useSWR({ url: '/datasets', params: { page: 1 } }, fetchDatasets) - const [showConfirmDelete, setShowConfirmDelete] = useState(false) const onDeleteClick: MouseEventHandler = useCallback((e) => { e.preventDefault() @@ -37,7 +37,8 @@ const DatasetCard = ({ try { await deleteDataset(dataset.id) notify({ type: 'success', message: t('dataset.datasetDeleted') }) - mutateDatasets() + if (onDelete) + onDelete() } catch (e: any) { notify({ type: 'error', message: `${t('dataset.datasetDeleteFailed')}${'message' in e ? `: ${e.message}` : ''}` }) diff --git a/web/app/(commonLayout)/datasets/Datasets.tsx b/web/app/(commonLayout)/datasets/Datasets.tsx index b044547748..649ba64000 100644 --- a/web/app/(commonLayout)/datasets/Datasets.tsx +++ b/web/app/(commonLayout)/datasets/Datasets.tsx @@ -1,24 +1,51 @@ 'use client' -import { useEffect } from 'react' -import useSWR from 'swr' -import { DataSet } from '@/models/datasets'; +import { useEffect, useRef } from 'react' +import useSWRInfinite from 'swr/infinite' +import { debounce } from 'lodash-es'; +import { DataSetListResponse } from '@/models/datasets'; import NewDatasetCard from './NewDatasetCard' import DatasetCard from './DatasetCard'; import { fetchDatasets } from '@/service/datasets'; +import { useSelector } from '@/context/app-context'; + +const getKey = (pageIndex: number, previousPageData: DataSetListResponse) => { + if (!pageIndex || previousPageData.has_more) + return { url: 'datasets', params: { page: pageIndex + 1, limit: 30 } } + return null +} const Datasets = () => { - // const { datasets, mutateDatasets } = useAppContext() - const { data: datasetList, mutate: mutateDatasets } = useSWR({ url: '/datasets', params: { page: 1 } }, fetchDatasets) + const { data, isLoading, setSize, mutate } = useSWRInfinite(getKey, fetchDatasets, { revalidateFirstPage: false }) + const loadingStateRef = useRef(false) + const pageContainerRef = useSelector(state => state.pageContainerRef) + const anchorRef = useRef(null) useEffect(() => { - mutateDatasets() + loadingStateRef.current = isLoading + }, [isLoading]) + + useEffect(() => { + const onScroll = debounce(() => { + if (!loadingStateRef.current) { + const { scrollTop, clientHeight } = pageContainerRef.current! + const anchorOffset = anchorRef.current!.offsetTop + if (anchorOffset - scrollTop - clientHeight < 100) { + setSize(size => size + 1) + } + } + }, 50) + + pageContainerRef.current?.addEventListener('scroll', onScroll) + return () => pageContainerRef.current?.removeEventListener('scroll', onScroll) }, []) return ( ) } diff --git a/web/app/(commonLayout)/datasets/NewDatasetCard.tsx b/web/app/(commonLayout)/datasets/NewDatasetCard.tsx index a3f6282c97..72f6b18dcc 100644 --- a/web/app/(commonLayout)/datasets/NewDatasetCard.tsx +++ b/web/app/(commonLayout)/datasets/NewDatasetCard.tsx @@ -1,16 +1,16 @@ 'use client' -import { useState } from 'react' +import { forwardRef, useState } from 'react' import classNames from 'classnames' import { useTranslation } from 'react-i18next' import style from '../list.module.css' -const CreateAppCard = () => { +const CreateAppCard = forwardRef((_, ref) => { const { t } = useTranslation() const [showNewAppDialog, setShowNewAppDialog] = useState(false) return ( - +
@@ -23,6 +23,6 @@ const CreateAppCard = () => { {/*
{t('app.createFromConfigFile')}
*/}
) -} +}) export default CreateAppCard diff --git a/web/app/api/hello/route.ts b/web/app/api/hello/route.ts deleted file mode 100644 index d3a7036df1..0000000000 --- a/web/app/api/hello/route.ts +++ /dev/null @@ -1,3 +0,0 @@ -export async function GET(_request: Request) { - return new Response('Hello, Next.js!') -} diff --git a/web/app/components/app-sidebar/basic.tsx b/web/app/components/app-sidebar/basic.tsx index 4cefafa0c1..55094c6190 100644 --- a/web/app/components/app-sidebar/basic.tsx +++ b/web/app/components/app-sidebar/basic.tsx @@ -15,7 +15,8 @@ export function randomString(length: number) { export type IAppBasicProps = { iconType?: 'app' | 'api' | 'dataset' - iconUrl?: string + icon?: string, + icon_background?: string, name: string type: string | React.ReactNode hoverTip?: string @@ -41,15 +42,20 @@ const ICON_MAP = { 'dataset': } -export default function AppBasic({ iconUrl, name, type, hoverTip, textStyle, iconType = 'app' }: IAppBasicProps) { +export default function AppBasic({ icon, icon_background, name, type, hoverTip, textStyle, iconType = 'app' }: IAppBasicProps) { return (
- {iconUrl && ( + {icon && icon_background && iconType === 'app' && (
- {/* {name} */} - {ICON_MAP[iconType]} +
)} + {iconType !== 'app' && +
+ {ICON_MAP[iconType]} +
+ + }
{name} diff --git a/web/app/components/app-sidebar/index.tsx b/web/app/components/app-sidebar/index.tsx index cdafcce78e..eb3a444554 100644 --- a/web/app/components/app-sidebar/index.tsx +++ b/web/app/components/app-sidebar/index.tsx @@ -7,6 +7,8 @@ export type IAppDetailNavProps = { iconType?: 'app' | 'dataset' title: string desc: string + icon: string + icon_background: string navigation: Array<{ name: string href: string @@ -16,13 +18,12 @@ export type IAppDetailNavProps = { extraInfo?: React.ReactNode } -const sampleAppIconUrl = 'https://images.unsplash.com/photo-1472099645785-5658abf4ff4e?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=facearea&facepad=2&w=256&h=256&q=80' -const AppDetailNav: FC = ({ title, desc, navigation, extraInfo, iconType = 'app' }) => { +const AppDetailNav: FC = ({ title, desc, icon, icon_background, navigation, extraInfo, iconType = 'app' }) => { return (
- +
}
-
-
-
- {item.isOpeningStatement && ( -
- -
{t('appDebug.openingStatement.title')}
-
- )} - {(isResponsing && !content) ? ( -
- -
- ) : ( - - )} - {!showEdit - ? (annotation?.content - && <> - - {annotation.content} - ) - : <> - - setInputValue(e.target.value)} - minHeight={58} - className={`${cn(s.textArea)} !py-2 resize-none block w-full !px-3 bg-gray-50 border border-gray-200 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm text-gray-700 tracking-[0.2px]`} - /> -
- - +
+
+
+
+ {item.isOpeningStatement && ( +
+ +
{t('appDebug.openingStatement.title')}
- - } -
-
- {!feedbackDisabled && !item.feedbackDisabled && renderItemOperation(displayScene !== 'console')} - {/* Admin feedback is displayed only in the background. */} - {!feedbackDisabled && renderFeedbackRating(localAdminFeedback?.rating, false, false)} - {/* User feedback must be displayed */} - {!feedbackDisabled && renderFeedbackRating(feedback?.rating, !isHideFeedbackEdit, displayScene !== 'console')} + )} + {(isResponsing && !content) ? ( +
+ +
+ ) : ( + + )} + {!showEdit + ? (annotation?.content + && <> + + {annotation.content} + ) + : <> + + setInputValue(e.target.value)} + minHeight={58} + className={`${cn(s.textArea)} !py-2 resize-none block w-full !px-3 bg-gray-50 border border-gray-200 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 sm:text-sm text-gray-700 tracking-[0.2px]`} + /> +
+ + +
+ + } +
+
+ + {!feedbackDisabled && !item.feedbackDisabled && renderItemOperation(displayScene !== 'console')} + {/* Admin feedback is displayed only in the background. */} + {!feedbackDisabled && renderFeedbackRating(localAdminFeedback?.rating, false, false)} + {/* User feedback must be displayed */} + {!feedbackDisabled && renderFeedbackRating(feedback?.rating, !isHideFeedbackEdit, displayScene !== 'console')} +
+ {more && }
- {more && }
@@ -367,7 +374,7 @@ const Question: FC = ({ id, content, more, useCurrentUserAvatar const userName = userProfile?.name return (
-
+
= ({ key: 'max_tokens', tip: t('common.model.params.maxTokenTip'), step: 100, - max: 4000, + max: modelId === 'gpt-4' ? 8000 : 4000, }, ] @@ -114,6 +115,16 @@ const ConifgModel: FC = ({ onShowUseGPT4Confirm() return } + if(id !== 'gpt-4' && completionParams.max_tokens > 4000) { + Toast.notify({ + type: 'warning', + message: t('common.model.params.setToCurrentModelMaxTokenTip') + }) + onCompletionParamsChange({ + ...completionParams, + max_tokens: 4000 + }) + } setModelId(id) } } diff --git a/web/app/components/app/configuration/prompt-value-panel/index.tsx b/web/app/components/app/configuration/prompt-value-panel/index.tsx index b86094556c..e5884bf3fb 100644 --- a/web/app/components/app/configuration/prompt-value-panel/index.tsx +++ b/web/app/components/app/configuration/prompt-value-panel/index.tsx @@ -73,7 +73,7 @@ const PromptValuePanel: FC = ({ { (promptTemplate && promptTemplate?.trim()) ? (
+ return (
{/* Panel Header */}
@@ -207,7 +207,7 @@ function DetailPanel{detail.model_config?.pre_prompt || emptyText}
{!isChatMode - ?
+ ?
: items.length < 8 - ?
+ ?
Promise } -// todo: get image url from appInfo -const defaultUrl = 'https://images.unsplash.com/photo-1472099645785-5658abf4ff4e?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=facearea&facepad=2&w=256&h=256&q=80' - function AppCard({ appInfo, cardType = 'app', @@ -104,7 +101,8 @@ function AppCard({
= ({
{t(`${prefixCustomize}.way1.step2`)}
{t(`${prefixCustomize}.way1.step2Tip`)}
-
+          
             export const APP_ID = '{appId}'
export const API_KEY = {`''`}
diff --git a/web/app/components/base/app-icon/index.tsx b/web/app/components/base/app-icon/index.tsx index d85bfca286..ef39f77cd1 100644 --- a/web/app/components/base/app-icon/index.tsx +++ b/web/app/components/base/app-icon/index.tsx @@ -2,6 +2,11 @@ import type { FC } from 'react' import classNames from 'classnames' import style from './style.module.css' +import data from '@emoji-mart/data' +import { init } from 'emoji-mart' + +init({ data }) + export type AppIconProps = { size?: 'tiny' | 'small' | 'medium' | 'large' rounded?: boolean @@ -9,14 +14,17 @@ export type AppIconProps = { background?: string className?: string innerIcon?: React.ReactNode + onClick?: () => void } const AppIcon: FC = ({ size = 'medium', rounded = false, + icon, background, className, innerIcon, + onClick, }) => { return ( = ({ style={{ background, }} + onClick={onClick} > - {innerIcon ? innerIcon : <>🤖} + {innerIcon ? innerIcon : icon && icon !== '' ? : } ) } diff --git a/web/app/components/base/block-input/index.tsx b/web/app/components/base/block-input/index.tsx index 6722da8ea3..1b9758593f 100644 --- a/web/app/components/base/block-input/index.tsx +++ b/web/app/components/base/block-input/index.tsx @@ -63,7 +63,7 @@ const BlockInput: FC = ({ }, [isEditing]) const style = classNames({ - 'block px-4 py-1 w-full h-full text-sm text-gray-900 outline-0 border-0': true, + 'block px-4 py-1 w-full h-full text-sm text-gray-900 outline-0 border-0 break-all': true, 'block-input--editing': isEditing, }) diff --git a/web/app/components/base/dialog/index.tsx b/web/app/components/base/dialog/index.tsx index 9e208d55ec..aaf7edea63 100644 --- a/web/app/components/base/dialog/index.tsx +++ b/web/app/components/base/dialog/index.tsx @@ -33,7 +33,7 @@ const CustomDialog = ({ const close = useCallback(() => onClose?.(), [onClose]) return ( - + , + HTMLElement + >; + } + } +} + +init({ data }) + +async function search(value: string) { + const emojis = await SearchIndex.search(value) || [] + + const results = emojis.map((emoji: any) => { + return emoji.skins[0].native + }) + return results +} + +const backgroundColors = [ + '#FFEAD5', + '#E4FBCC', + '#D3F8DF', + '#E0F2FE', + + '#E0EAFF', + '#EFF1F5', + '#FBE8FF', + '#FCE7F6', + + '#FEF7C3', + '#E6F4D7', + '#D5F5F6', + '#D1E9FF', + + '#D1E0FF', + '#D5D9EB', + '#ECE9FE', + '#FFE4E8', +] +interface IEmojiPickerProps { + isModal?: boolean + onSelect?: (emoji: string, background: string) => void + onClose?: () => void +} + +const EmojiPicker: FC = ({ + isModal = true, + onSelect, + onClose + +}) => { + const { t } = useTranslation() + const { categories } = data as any + const [selectedEmoji, setSelectedEmoji] = useState('') + const [selectedBackground, setSelectedBackground] = useState(backgroundColors[0]) + + const [searchedEmojis, setSearchedEmojis] = useState([]) + const [isSearching, setIsSearching] = useState(false) + + return isModal ? { }} + isShow + closable={false} + wrapperClassName='!z-40' + className={cn(s.container, '!w-[362px] !p-0')} + > +
+
+
+
+ ) => { + if (e.target.value === '') { + setIsSearching(false) + return + } else { + setIsSearching(true) + const emojis = await search(e.target.value) + setSearchedEmojis(emojis) + } + }} + /> +
+
+ + +
+ {isSearching && <> +
+

Search

+
+ {searchedEmojis.map((emoji: string, index: number) => { + return
{ + setSelectedEmoji(emoji) + }} + > +
+ +
+
+ })} +
+
+ } + + + {categories.map((category: any, index: number) => { + return
+

{category.id}

+
+ {category.emojis.map((emoji: string, index: number) => { + return
{ + setSelectedEmoji(emoji) + }} + > +
+ +
+
+ })} + +
+
+ })} +
+ + {/* Color Select */} +
+

Choose Style

+
+ {backgroundColors.map((color) => { + return
{ + setSelectedBackground(color) + }} + > +
+ {selectedEmoji !== '' && } +
+
+ })} +
+
+ +
+ + +
+
: <> + +} +export default EmojiPicker diff --git a/web/app/components/base/emoji-picker/style.module.css b/web/app/components/base/emoji-picker/style.module.css new file mode 100644 index 0000000000..5facb3560a --- /dev/null +++ b/web/app/components/base/emoji-picker/style.module.css @@ -0,0 +1,12 @@ +.container { + display: flex; + flex-direction: column; + align-items: flex-start; + width: 362px; + max-height: 552px; + + border: 0.5px solid #EAECF0; + box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03); + border-radius: 12px; + background: #fff; +} diff --git a/web/app/components/base/modal/index.tsx b/web/app/components/base/modal/index.tsx index bc33ac8e0a..7c11089769 100644 --- a/web/app/components/base/modal/index.tsx +++ b/web/app/components/base/modal/index.tsx @@ -5,6 +5,7 @@ import { XMarkIcon } from '@heroicons/react/24/outline' type IModal = { className?: string + wrapperClassName?: string isShow: boolean onClose: () => void title?: React.ReactNode @@ -15,6 +16,7 @@ type IModal = { export default function Modal({ className, + wrapperClassName, isShow, onClose, title, @@ -23,51 +25,51 @@ export default function Modal({ closable = false, }: IModal) { return ( - - - -
- + + + +
+ -
-
- - - {title && - {title} - } - {description && - {description} - } - {closable - &&
- -
} - {children} -
-
-
-
-
-
+
+
+ + + {title && + {title} + } + {description && + {description} + } + {closable + &&
+ +
} + {children} +
+
+
+
+
+
) } diff --git a/web/app/components/datasets/create/file-preview/index.module.css b/web/app/components/datasets/create/file-preview/index.module.css index 8c2e767848..f64f493640 100644 --- a/web/app/components/datasets/create/file-preview/index.module.css +++ b/web/app/components/datasets/create/file-preview/index.module.css @@ -43,4 +43,7 @@ background: #f9fafb center no-repeat url(../assets/Loading.svg); background-size: contain; } + .fileContent { + white-space: pre-line; + } \ No newline at end of file diff --git a/web/app/components/datasets/create/file-uploader/index.tsx b/web/app/components/datasets/create/file-uploader/index.tsx index cb750c678c..d0898b6de0 100644 --- a/web/app/components/datasets/create/file-uploader/index.tsx +++ b/web/app/components/datasets/create/file-uploader/index.tsx @@ -190,13 +190,15 @@ const FileUploader = ({ file, onFileUpdate }: IFileUploaderProps) => { onChange={fileChangeHandle} />
{t('datasetCreation.stepOne.uploader.title')}
- {!currentFile && !file && ( -
- {t('datasetCreation.stepOne.uploader.button')} - - {dragging &&
} -
- )} +
+ {!currentFile && !file && ( +
+ {t('datasetCreation.stepOne.uploader.button')} + + {dragging &&
} +
+ )} +
{currentFile && (
{uploading && ( diff --git a/web/app/components/datasets/create/step-two/preview-item/index.tsx b/web/app/components/datasets/create/step-two/preview-item/index.tsx index 4108ef4807..1644e9b102 100644 --- a/web/app/components/datasets/create/step-two/preview-item/index.tsx +++ b/web/app/components/datasets/create/step-two/preview-item/index.tsx @@ -41,7 +41,7 @@ const PreviewItem: FC = ({
- {content} +
{content}
) diff --git a/web/app/components/datasets/documents/detail/completed/style.module.css b/web/app/components/datasets/documents/detail/completed/style.module.css index d1eeaf6726..778b550ad4 100644 --- a/web/app/components/datasets/documents/detail/completed/style.module.css +++ b/web/app/components/datasets/documents/detail/completed/style.module.css @@ -44,7 +44,8 @@ @apply h-8 py-0 bg-gray-50 hover:bg-gray-100 rounded-lg shadow-none !important; } .segModalContent { - @apply h-96 text-gray-800 text-base overflow-y-scroll; + @apply h-96 text-gray-800 text-base break-all overflow-y-scroll; + white-space: pre-line; } .footer { @apply flex items-center justify-between box-border border-t-gray-200 border-t-[0.5px] pt-3 mt-4; diff --git a/web/app/components/datasets/documents/index.tsx b/web/app/components/datasets/documents/index.tsx index 6434dcedeb..bd7a9f56f4 100644 --- a/web/app/components/datasets/documents/index.tsx +++ b/web/app/components/datasets/documents/index.tsx @@ -69,7 +69,7 @@ type IDocumentsProps = { datasetId: string } -export const fetcher = (url: string) => get(url, {}, { isMock: true }) +export const fetcher = (url: string) => get(url, {}, {}) const Documents: FC = ({ datasetId }) => { const { t } = useTranslation() diff --git a/web/app/components/header/account-setting/index.tsx b/web/app/components/header/account-setting/index.tsx index 8b1c51fbeb..7689c24cbb 100644 --- a/web/app/components/header/account-setting/index.tsx +++ b/web/app/components/header/account-setting/index.tsx @@ -75,6 +75,7 @@ export default function AccountSetting({ isShow onClose={() => { }} className={s.modal} + wrapperClassName='pt-[60px]' >
diff --git a/web/app/components/header/account-setting/provider-page/azure-provider/index.tsx b/web/app/components/header/account-setting/provider-page/azure-provider/index.tsx index 71236120e5..1cbe7d0674 100644 --- a/web/app/components/header/account-setting/provider-page/azure-provider/index.tsx +++ b/web/app/components/header/account-setting/provider-page/azure-provider/index.tsx @@ -1,10 +1,17 @@ import type { Provider, ProviderAzureToken } from '@/models/common' +import { ProviderName } from '@/models/common' import { useTranslation } from 'react-i18next' import Link from 'next/link' import { ArrowTopRightOnSquareIcon } from '@heroicons/react/24/outline' -import ProviderInput, { ProviderValidateTokenInput} from '../provider-input' -import { useState } from 'react' -import { ValidatedStatus } from '../provider-input/useValidateToken' +import { useState, useEffect } from 'react' +import ProviderInput from '../provider-input' +import useValidateToken, { ValidatedStatus } from '../provider-input/useValidateToken' +import { + ValidatedErrorIcon, + ValidatedSuccessIcon, + ValidatingTip, + ValidatedErrorOnAzureOpenaiTip +} from '../provider-input/Validate' interface IAzureProviderProps { provider: Provider @@ -17,52 +24,72 @@ const AzureProvider = ({ onValidatedStatus }: IAzureProviderProps) => { const { t } = useTranslation() - const [token, setToken] = useState(provider.token as ProviderAzureToken || {}) - const handleFocus = () => { - if (token === provider.token) { - token.azure_api_key = '' + const [token, setToken] = useState(provider.provider_name === ProviderName.AZURE_OPENAI ? {...provider.token}: {}) + const [ validating, validatedStatus, setValidatedStatus, validate ] = useValidateToken(provider.provider_name) + const handleFocus = (type: keyof ProviderAzureToken) => { + if (token[type] === (provider?.token as ProviderAzureToken)[type]) { + token[type] = '' setToken({...token}) onTokenChange({...token}) + setValidatedStatus(undefined) } } - const handleChange = (type: keyof ProviderAzureToken, v: string) => { + const handleChange = (type: keyof ProviderAzureToken, v: string, validate: any) => { token[type] = v setToken({...token}) onTokenChange({...token}) + validate({...token}, { + beforeValidating: () => { + if (!token.openai_api_base || !token.openai_api_key) { + setValidatedStatus(undefined) + return false + } + return true + } + }) } + const getValidatedIcon = () => { + if (validatedStatus === ValidatedStatus.Error || validatedStatus === ValidatedStatus.Exceed) { + return + } + if (validatedStatus === ValidatedStatus.Success) { + return + } + } + const getValidatedTip = () => { + if (validating) { + return + } + if (validatedStatus === ValidatedStatus.Error) { + return + } + } + useEffect(() => { + if (typeof onValidatedStatus === 'function') { + onValidatedStatus(validatedStatus) + } + }, [validatedStatus]) return (
handleChange('azure_api_base', v)} + name={t('common.provider.azure.apiBase')} + placeholder={t('common.provider.azure.apiBasePlaceholder')} + value={token.openai_api_base} + onChange={(v) => handleChange('openai_api_base', v, validate)} + onFocus={() => handleFocus('openai_api_base')} + validatedIcon={getValidatedIcon()} /> handleChange('azure_api_type', v)} - /> - handleChange('azure_api_version', v)} - /> - handleChange('azure_api_key', v)} - onFocus={handleFocus} - onValidatedStatus={onValidatedStatus} - providerName={provider.provider_name} + value={token.openai_api_key} + onChange={(v) => handleChange('openai_api_key', v, validate)} + onFocus={() => handleFocus('openai_api_key')} + validatedIcon={getValidatedIcon()} + validatedTip={getValidatedTip()} /> {t('common.provider.azure.helpTip')} @@ -72,4 +99,4 @@ const AzureProvider = ({ ) } -export default AzureProvider \ No newline at end of file +export default AzureProvider diff --git a/web/app/components/header/account-setting/provider-page/index.tsx b/web/app/components/header/account-setting/provider-page/index.tsx index c1bce2c54c..67112b8142 100644 --- a/web/app/components/header/account-setting/provider-page/index.tsx +++ b/web/app/components/header/account-setting/provider-page/index.tsx @@ -67,7 +67,7 @@ const ProviderPage = () => { const providerHosted = data?.filter(provider => provider.provider_name === 'openai' && provider.provider_type === 'system')?.[0] return ( -
+
{ providerHosted && !IS_CE_EDITION && ( <> diff --git a/web/app/components/header/account-setting/provider-page/openai-provider/index.tsx b/web/app/components/header/account-setting/provider-page/openai-provider/index.tsx index adff6bdf30..f49b229812 100644 --- a/web/app/components/header/account-setting/provider-page/openai-provider/index.tsx +++ b/web/app/components/header/account-setting/provider-page/openai-provider/index.tsx @@ -1,222 +1,94 @@ -import { ChangeEvent, useEffect, useRef, useState } from 'react' -import { useContext } from 'use-context-selector' +import type { Provider } from '@/models/common' +import { useState, useEffect } from 'react' import { useTranslation } from 'react-i18next' -import { debounce } from 'lodash-es' +import ProviderInput from '../provider-input' import Link from 'next/link' -import useSWR from 'swr' -import { ArrowTopRightOnSquareIcon, PencilIcon } from '@heroicons/react/24/outline' -import { CheckCircleIcon, ExclamationCircleIcon } from '@heroicons/react/24/solid' -import Button from '@/app/components/base/button' -import s from './index.module.css' -import classNames from 'classnames' -import { fetchTenantInfo, validateProviderKey, updateProviderAIKey } from '@/service/common' -import { ToastContext } from '@/app/components/base/toast' -import Indicator from '../../../indicator' -import I18n from '@/context/i18n' +import { ArrowTopRightOnSquareIcon } from '@heroicons/react/24/outline' +import useValidateToken, { ValidatedStatus } from '../provider-input/useValidateToken' +import { + ValidatedErrorIcon, + ValidatedSuccessIcon, + ValidatingTip, + ValidatedExceedOnOpenaiTip, + ValidatedErrorOnOpenaiTip +} from '../provider-input/Validate' -type IStatusType = 'normal' | 'verified' | 'error' | 'error-api-key-exceed-bill' - -type TInputWithStatusProps = { - value: string - onChange: (v: string) => void - onValidating: (validating: boolean) => void - verifiedStatus: IStatusType - onVerified: (verified: IStatusType) => void -} -const InputWithStatus = ({ - value, - onChange, - onValidating, - verifiedStatus, - onVerified -}: TInputWithStatusProps) => { - const { t } = useTranslation() - const validateKey = useRef(debounce(async (token: string) => { - if (!token) return - onValidating(true) - try { - const res = await validateProviderKey({ url: '/workspaces/current/providers/openai/token-validate', body: { token } }) - onVerified(res.result === 'success' ? 'verified' : 'error') - } catch (e: any) { - if (e.status === 400) { - e.json().then(({ code }: any) => { - if (code === 'provider_request_failed') { - onVerified('error-api-key-exceed-bill') - } - }) - } else { - onVerified('error') - } - } finally { - onValidating(false) - } - }, 500)) - - const handleChange = (e: ChangeEvent) => { - const inputValue = e.target.value - onChange(inputValue) - if (!inputValue) { - onVerified('normal') - } - validateKey.current(inputValue) - } - return ( -
- - { - verifiedStatus === 'error' && - } - { - verifiedStatus === 'verified' && - } -
- ) +interface IOpenaiProviderProps { + provider: Provider + onValidatedStatus: (status?: ValidatedStatus) => void + onTokenChange: (token: string) => void } -const OpenaiProvider = () => { +const OpenaiProvider = ({ + provider, + onValidatedStatus, + onTokenChange +}: IOpenaiProviderProps) => { const { t } = useTranslation() - const { locale } = useContext(I18n) - const { data: userInfo, mutate } = useSWR({ url: '/info' }, fetchTenantInfo) - const [inputValue, setInputValue] = useState('') - const [validating, setValidating] = useState(false) - const [editStatus, setEditStatus] = useState('normal') - const [loading, setLoading] = useState(false) - const [editing, setEditing] = useState(false) - const [invalidStatus, setInvalidStatus] = useState(false) - const { notify } = useContext(ToastContext) - const provider = userInfo?.providers?.find(({ provider }) => provider === 'openai') - - const handleReset = () => { - setInputValue('') - setValidating(false) - setEditStatus('normal') - setLoading(false) - setEditing(false) - } - const handleSave = async () => { - if (editStatus === 'verified') { - try { - setLoading(true) - await updateProviderAIKey({ url: '/workspaces/current/providers/openai/token', body: { token: inputValue ?? '' } }) - notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') }) - } catch (e) { - notify({ type: 'error', message: t('common.provider.saveFailed') }) - } finally { - setLoading(false) - handleReset() - mutate() - } + const [token, setToken] = useState(provider.token as string || '') + const [ validating, validatedStatus, setValidatedStatus, validate ] = useValidateToken(provider.provider_name) + const handleFocus = () => { + if (token === provider.token) { + setToken('') + onTokenChange('') + setValidatedStatus(undefined) } } + const handleChange = (v: string) => { + setToken(v) + onTokenChange(v) + validate(v, { + beforeValidating: () => { + if (!v) { + setValidatedStatus(undefined) + return false + } + return true + } + }) + } useEffect(() => { - if (provider && !provider.token_is_valid && provider.token_is_set) { - setInvalidStatus(true) + if (typeof onValidatedStatus === 'function') { + onValidatedStatus(validatedStatus) } - }, [userInfo]) + }, [validatedStatus]) - const showInvalidStatus = invalidStatus && !editing - const renderErrorMessage = () => { + const getValidatedIcon = () => { + if (validatedStatus === ValidatedStatus.Error || validatedStatus === ValidatedStatus.Exceed) { + return + } + if (validatedStatus === ValidatedStatus.Success) { + return + } + } + const getValidatedTip = () => { if (validating) { - return ( -
- {t('common.provider.validating')} -
- ) + return } - if (editStatus === 'error-api-key-exceed-bill') { - return ( -
- {t('common.provider.apiKeyExceedBill')}  - - {locale === 'en' ? 'this link' : '这篇文档'} - -
- ) + if (validatedStatus === ValidatedStatus.Exceed) { + return } - if (showInvalidStatus || editStatus === 'error') { - return ( -
- {t('common.provider.invalidKey')} -
- ) + if (validatedStatus === ValidatedStatus.Error) { + return } - return null } return (
-
-
- {t('common.provider.apiKey')} -
- { - provider && !editing && ( -
setEditing(true)} - > - - {t('common.operation.edit')} -
- ) - } - { - (inputValue || editing) && ( - <> - - - - ) - } -
- { - (!provider || (provider && editing)) && ( - setInputValue(v)} - verifiedStatus={editStatus} - onVerified={v => setEditStatus(v)} - onValidating={v => setValidating(v)} - /> - ) - } - { - (provider && !editing) && ( -
- sk-0C...skuA - -
- ) - } - {renderErrorMessage()} - - {t('appOverview.welcome.getKeyTip')} -
+ + + {t('appOverview.welcome.getKeyTip')} +
) } diff --git a/web/app/components/header/account-setting/provider-page/openai-provider/provider.tsx b/web/app/components/header/account-setting/provider-page/openai-provider/provider.tsx deleted file mode 100644 index 45747cb3a8..0000000000 --- a/web/app/components/header/account-setting/provider-page/openai-provider/provider.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import type { Provider } from '@/models/common' -import { useState } from 'react' -import { useTranslation } from 'react-i18next' -import { ProviderValidateTokenInput } from '../provider-input' -import Link from 'next/link' -import { ArrowTopRightOnSquareIcon } from '@heroicons/react/24/outline' -import { ValidatedStatus } from '../provider-input/useValidateToken' - -interface IOpenaiProviderProps { - provider: Provider - onValidatedStatus: (status?: ValidatedStatus) => void - onTokenChange: (token: string) => void -} - -const OpenaiProvider = ({ - provider, - onValidatedStatus, - onTokenChange -}: IOpenaiProviderProps) => { - const { t } = useTranslation() - const [token, setToken] = useState(provider.token as string || '') - const handleFocus = () => { - if (token === provider.token) { - setToken('') - onTokenChange('') - } - } - const handleChange = (v: string) => { - setToken(v) - onTokenChange(v) - } - - return ( -
- - - {t('appOverview.welcome.getKeyTip')} -
- ) -} - -export default OpenaiProvider \ No newline at end of file diff --git a/web/app/components/header/account-setting/provider-page/provider-input/Validate.tsx b/web/app/components/header/account-setting/provider-page/provider-input/Validate.tsx new file mode 100644 index 0000000000..740a149a93 --- /dev/null +++ b/web/app/components/header/account-setting/provider-page/provider-input/Validate.tsx @@ -0,0 +1,59 @@ +import Link from 'next/link' +import { CheckCircleIcon, ExclamationCircleIcon } from '@heroicons/react/24/solid' +import { useTranslation } from 'react-i18next' +import { useContext } from 'use-context-selector' +import I18n from '@/context/i18n' + +export const ValidatedErrorIcon = () => { + return +} + +export const ValidatedSuccessIcon = () => { + return +} + +export const ValidatingTip = () => { + const { t } = useTranslation() + return ( +
+ {t('common.provider.validating')} +
+ ) +} + +export const ValidatedExceedOnOpenaiTip = () => { + const { t } = useTranslation() + const { locale } = useContext(I18n) + + return ( +
+ {t('common.provider.apiKeyExceedBill')}  + + {locale === 'en' ? 'this link' : '这篇文档'} + +
+ ) +} + +export const ValidatedErrorOnOpenaiTip = () => { + const { t } = useTranslation() + + return ( +
+ {t('common.provider.invalidKey')} +
+ ) +} + +export const ValidatedErrorOnAzureOpenaiTip = () => { + const { t } = useTranslation() + + return ( +
+ {t('common.provider.invalidApiKey')} +
+ ) +} \ No newline at end of file diff --git a/web/app/components/header/account-setting/provider-page/provider-input/index.tsx b/web/app/components/header/account-setting/provider-page/provider-input/index.tsx index 5a489d17f2..84ab9901c1 100644 --- a/web/app/components/header/account-setting/provider-page/provider-input/index.tsx +++ b/web/app/components/header/account-setting/provider-page/provider-input/index.tsx @@ -1,10 +1,5 @@ -import { ChangeEvent, useEffect } from 'react' -import Link from 'next/link' -import { CheckCircleIcon, ExclamationCircleIcon } from '@heroicons/react/24/solid' -import { useTranslation } from 'react-i18next' -import { useContext } from 'use-context-selector' -import I18n from '@/context/i18n' -import useValidateToken, { ValidatedStatus } from './useValidateToken' +import { ChangeEvent } from 'react' +import { ReactElement } from 'react-markdown/lib/react-markdown' interface IProviderInputProps { value?: string @@ -13,6 +8,8 @@ interface IProviderInputProps { className?: string onChange: (v: string) => void onFocus?: () => void + validatedIcon?: ReactElement + validatedTip?: ReactElement } const ProviderInput = ({ @@ -22,6 +19,8 @@ const ProviderInput = ({ className, onChange, onFocus, + validatedIcon, + validatedTip }: IProviderInputProps) => { const handleChange = (e: ChangeEvent) => { @@ -47,95 +46,9 @@ const ProviderInput = ({ onChange={handleChange} onFocus={onFocus} /> + {validatedIcon}
-
- ) -} - -type TproviderInputProps = IProviderInputProps - & { - onValidatedStatus?: (status?: ValidatedStatus) => void - providerName: string - } -export const ProviderValidateTokenInput = ({ - value, - name, - placeholder, - className, - onChange, - onFocus, - onValidatedStatus, - providerName -}: TproviderInputProps) => { - const { t } = useTranslation() - const { locale } = useContext(I18n) - const [ validating, validatedStatus, validate ] = useValidateToken(providerName) - - useEffect(() => { - if (typeof onValidatedStatus === 'function') { - onValidatedStatus(validatedStatus) - } - }, [validatedStatus]) - - const handleChange = (e: ChangeEvent) => { - const inputValue = e.target.value - onChange(inputValue) - - validate(inputValue) - } - - return ( -
-
{name}
-
- - { - validatedStatus === ValidatedStatus.Error && - } - { - validatedStatus === ValidatedStatus.Success && - } -
- { - validating && ( -
- {t('common.provider.validating')} -
- ) - } - { - validatedStatus === ValidatedStatus.Exceed && !validating && ( -
- {t('common.provider.apiKeyExceedBill')}  - - {locale === 'en' ? 'this link' : '这篇文档'} - -
- ) - } - { - validatedStatus === ValidatedStatus.Error && !validating && ( -
- {t('common.provider.invalidKey')} -
- ) - } + {validatedTip}
) } diff --git a/web/app/components/header/account-setting/provider-page/provider-input/useValidateToken.ts b/web/app/components/header/account-setting/provider-page/provider-input/useValidateToken.ts index 5064910671..69b7529449 100644 --- a/web/app/components/header/account-setting/provider-page/provider-input/useValidateToken.ts +++ b/web/app/components/header/account-setting/provider-page/provider-input/useValidateToken.ts @@ -1,4 +1,4 @@ -import { useState, useCallback } from 'react' +import { useState, useCallback, SetStateAction, Dispatch } from 'react' import debounce from 'lodash-es/debounce' import { DebouncedFunc } from 'lodash-es' import { validateProviderKey } from '@/service/common' @@ -8,14 +8,24 @@ export enum ValidatedStatus { Error = 'error', Exceed = 'exceed' } +export type SetValidatedStatus = Dispatch> +export type ValidateFn = DebouncedFunc<(token: any, config: ValidateFnConfig) => void> +type ValidateTokenReturn = [ + boolean, + ValidatedStatus | undefined, + SetValidatedStatus, + ValidateFn +] +export type ValidateFnConfig = { + beforeValidating: (token: any) => boolean +} -const useValidateToken = (providerName: string): [boolean, ValidatedStatus | undefined, DebouncedFunc<(token: string) => Promise>] => { +const useValidateToken = (providerName: string): ValidateTokenReturn => { const [validating, setValidating] = useState(false) const [validatedStatus, setValidatedStatus] = useState() - const validate = useCallback(debounce(async (token: string) => { - if (!token) { - setValidatedStatus(undefined) - return + const validate = useCallback(debounce(async (token: string, config: ValidateFnConfig) => { + if (!config.beforeValidating(token)) { + return false } setValidating(true) try { @@ -24,8 +34,10 @@ const useValidateToken = (providerName: string): [boolean, ValidatedStatus | und } catch (e: any) { if (e.status === 400) { e.json().then(({ code }: any) => { - if (code === 'provider_request_failed') { + if (code === 'provider_request_failed' && providerName === 'openai') { setValidatedStatus(ValidatedStatus.Exceed) + } else { + setValidatedStatus(ValidatedStatus.Error) } }) } else { @@ -39,7 +51,8 @@ const useValidateToken = (providerName: string): [boolean, ValidatedStatus | und return [ validating, validatedStatus, - validate, + setValidatedStatus, + validate ] } diff --git a/web/app/components/header/account-setting/provider-page/provider-item/index.tsx b/web/app/components/header/account-setting/provider-page/provider-item/index.tsx index 4e8ef532e3..14f8c3f5c3 100644 --- a/web/app/components/header/account-setting/provider-page/provider-item/index.tsx +++ b/web/app/components/header/account-setting/provider-page/provider-item/index.tsx @@ -5,7 +5,8 @@ import { useContext } from 'use-context-selector' import Indicator from '../../../indicator' import { useTranslation } from 'react-i18next' import type { Provider, ProviderAzureToken } from '@/models/common' -import OpenaiProvider from '../openai-provider/provider' +import { ProviderName } from '@/models/common' +import OpenaiProvider from '../openai-provider' import AzureProvider from '../azure-provider' import { ValidatedStatus } from '../provider-input/useValidateToken' import { updateProviderAIKey } from '@/service/common' @@ -33,18 +34,28 @@ const ProviderItem = ({ const { notify } = useContext(ToastContext) const [token, setToken] = useState( provider.provider_name === 'azure_openai' - ? { azure_api_base: '', azure_api_type: '', azure_api_version: '', azure_api_key: '' } + ? { openai_api_base: '', openai_api_key: '' } : '' ) const id = `${provider.provider_name}-${provider.provider_type}` const isOpen = id === activeId - const providerKey = provider.provider_name === 'azure_openai' ? (provider.token as ProviderAzureToken)?.azure_api_key : provider.token const comingSoon = false const isValid = provider.is_valid + const providerTokenHasSetted = () => { + if (provider.provider_name === ProviderName.AZURE_OPENAI) { + return provider.token && provider.token.openai_api_base && provider.token.openai_api_key ? { + openai_api_base: provider.token.openai_api_base, + openai_api_key: provider.token.openai_api_key + }: undefined + } + if (provider.provider_name === ProviderName.OPENAI) { + return provider.token + } + } const handleUpdateToken = async () => { if (loading) return - if (validatedStatus === ValidatedStatus.Success || !token) { + if (validatedStatus === ValidatedStatus.Success) { try { setLoading(true) await updateProviderAIKey({ url: `/workspaces/current/providers/${provider.provider_name}/token`, body: { token } }) @@ -65,7 +76,7 @@ const ProviderItem = ({
{name}
{ - providerKey && !comingSoon && !isOpen && ( + providerTokenHasSetted() && !comingSoon && !isOpen && (
{!isValid &&
{t('common.provider.invalidApiKey')}
} @@ -78,7 +89,7 @@ const ProviderItem = ({ px-3 h-[28px] bg-white border border-gray-200 rounded-md cursor-pointer text-xs font-medium text-gray-700 flex items-center ' onClick={() => onActive(id)}> - {providerKey ? t('common.provider.editKey') : t('common.provider.addKey')} + {providerTokenHasSetted() ? t('common.provider.editKey') : t('common.provider.addKey')}
) } @@ -114,7 +125,7 @@ const ProviderItem = ({ }
{ - provider.provider_name === 'openai' && isOpen && ( + provider.provider_name === ProviderName.OPENAI && isOpen && ( setValidatedStatus(v)} @@ -123,7 +134,7 @@ const ProviderItem = ({ ) } { - provider.provider_name === 'azure_openai' && isOpen && ( + provider.provider_name === ProviderName.AZURE_OPENAI && isOpen && ( setValidatedStatus(v)} @@ -135,4 +146,4 @@ const ProviderItem = ({ ) } -export default ProviderItem \ No newline at end of file +export default ProviderItem diff --git a/web/app/components/header/index.tsx b/web/app/components/header/index.tsx index f188eaf1ca..d968f7b49b 100644 --- a/web/app/components/header/index.tsx +++ b/web/app/components/header/index.tsx @@ -69,11 +69,13 @@ const Header: FC = ({ appItems, curApp, userProfile, onLogout, lan text={t('common.menus.apps')} activeSegment={['apps', 'app']} link='/apps' - curNav={curApp && { id: curApp.id, name: curApp.name }} + curNav={curApp && { id: curApp.id, name: curApp.name ,icon: curApp.icon, icon_background: curApp.icon_background}} navs={appItems.map(item => ({ id: item.id, name: item.name, - link: `/app/${item.id}/overview` + link: `/app/${item.id}/overview`, + icon: item.icon, + icon_background: item.icon_background }))} createText={t('common.menus.newApp')} onCreate={() => setShowNewAppDialog(true)} @@ -91,11 +93,13 @@ const Header: FC = ({ appItems, curApp, userProfile, onLogout, lan text={t('common.menus.datasets')} activeSegment='datasets' link='/datasets' - curNav={currentDataset && { id: currentDataset.id, name: currentDataset.name }} + curNav={currentDataset && { id: currentDataset.id, name: currentDataset.name, icon: currentDataset.icon, icon_background: currentDataset.icon_background }} navs={datasets.map(dataset => ({ id: dataset.id, name: dataset.name, - link: `/datasets/${dataset.id}/documents` + link: `/datasets/${dataset.id}/documents`, + icon: dataset.icon, + icon_background: dataset.icon_background }))} createText={t('common.menus.newDataset')} onCreate={() => router.push('/datasets/create')} diff --git a/web/app/components/header/nav/nav-selector/index.tsx b/web/app/components/header/nav/nav-selector/index.tsx index ed2bb99668..ee0b630cd5 100644 --- a/web/app/components/header/nav/nav-selector/index.tsx +++ b/web/app/components/header/nav/nav-selector/index.tsx @@ -10,6 +10,8 @@ type NavItem = { id: string name: string link: string + icon: string + icon_background: string } export interface INavSelectorProps { navs: NavItem[] @@ -66,7 +68,7 @@ const NavSelector = ({ curNav, navs, createText, onCreate }: INavSelectorProps)
router.push(nav.link)}>
- +
@@ -102,4 +104,4 @@ const NavSelector = ({ curNav, navs, createText, onCreate }: INavSelectorProps) ) } -export default NavSelector \ No newline at end of file +export default NavSelector diff --git a/web/app/components/share/chat/config-scence/index.tsx b/web/app/components/share/chat/config-scence/index.tsx index e22933ce5d..152d5e58a4 100644 --- a/web/app/components/share/chat/config-scence/index.tsx +++ b/web/app/components/share/chat/config-scence/index.tsx @@ -5,7 +5,7 @@ import Welcome from '../welcome' const ConfigSence: FC = (props) => { return ( -
+
) diff --git a/web/app/components/share/chat/index.tsx b/web/app/components/share/chat/index.tsx index 6d1c80fd84..f3ce070f38 100644 --- a/web/app/components/share/chat/index.tsx +++ b/web/app/components/share/chat/index.tsx @@ -441,6 +441,8 @@ const Main: FC = () => {
handleConversationIdChange('-1')} diff --git a/web/app/components/share/header.tsx b/web/app/components/share/header.tsx index 0ad2247d2f..60df05f70e 100644 --- a/web/app/components/share/header.tsx +++ b/web/app/components/share/header.tsx @@ -7,6 +7,8 @@ import { } from '@heroicons/react/24/solid' export type IHeaderProps = { title: string + icon: string + icon_background: string isMobile?: boolean onShowSideBar?: () => void onCreateNewChat?: () => void @@ -14,6 +16,8 @@ export type IHeaderProps = { const Header: FC = ({ title, isMobile, + icon, + icon_background, onShowSideBar, onCreateNewChat, }) => { @@ -28,7 +32,7 @@ const Header: FC = ({
) :
}
- +
{title}
{isMobile ? ( diff --git a/web/app/install/installForm.tsx b/web/app/install/installForm.tsx index 749719e9e9..605a2dced0 100644 --- a/web/app/install/installForm.tsx +++ b/web/app/install/installForm.tsx @@ -7,7 +7,7 @@ import { useRouter } from 'next/navigation' import Toast from '../components/base/toast' import { setup } from '@/service/common' -const validEmailReg = /^([a-zA-Z0-9_-])+@([a-zA-Z0-9_-])+(\.[a-zA-Z0-9_-])+/ +const validEmailReg = /^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$/ const validPassword = /^(?=.*[a-zA-Z])(?=.*\d).{8,}$/ const InstallForm = () => { diff --git a/web/app/signin/normalForm.tsx b/web/app/signin/normalForm.tsx index e0626572ac..93e6b0d561 100644 --- a/web/app/signin/normalForm.tsx +++ b/web/app/signin/normalForm.tsx @@ -13,7 +13,7 @@ import Button from '@/app/components/base/button' import { login, oauth } from '@/service/common' import { apiPrefix } from '@/config' -const validEmailReg = /^([a-zA-Z0-9_-])+@([a-zA-Z0-9_-])+(\.[a-zA-Z0-9_-])+/ +const validEmailReg = /^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$/ type IState = { formValid: boolean diff --git a/web/app/styles/globals.css b/web/app/styles/globals.css index f4710b0275..57145790ec 100644 --- a/web/app/styles/globals.css +++ b/web/app/styles/globals.css @@ -131,4 +131,10 @@ button:focus-within { -webkit-text-fill-color: transparent; background-clip: text; text-fill-color: transparent; +} + +/* overwrite paging active dark model style */ +[class*=style_paginatio] li .text-primary-600 { + color: rgb(28 100 242); + background-color: rgb(235 245 255); } \ No newline at end of file diff --git a/web/app/styles/markdown.scss b/web/app/styles/markdown.scss index fdfaae0cf9..ac19b9d76e 100644 --- a/web/app/styles/markdown.scss +++ b/web/app/styles/markdown.scss @@ -54,6 +54,7 @@ font-weight: 400; line-height: 1.5; word-wrap: break-word; + word-break: break-all; user-select: text; } @@ -593,6 +594,7 @@ .markdown-body table th { font-weight: var(--base-text-weight-semibold, 600); + white-space: nowrap; } .markdown-body table th, diff --git a/web/config/index.ts b/web/config/index.ts index f5a85b52a4..3b3453061a 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -31,9 +31,6 @@ if (process.env.NEXT_PUBLIC_API_PREFIX && process.env.NEXT_PUBLIC_PUBLIC_API_PRE export const API_PREFIX: string = apiPrefix; export const PUBLIC_API_PREFIX: string = publicApiPrefix; -// mock server -export const MOCK_API_PREFIX = 'http://127.0.0.1:3001' - const EDITION = process.env.NEXT_PUBLIC_EDITION || globalThis.document?.body?.getAttribute('data-public-edition') export const IS_CE_EDITION = EDITION === 'SELF_HOSTED' @@ -78,9 +75,9 @@ export const LOCALE_COOKIE_NAME = 'locale' export const DEFAULT_VALUE_MAX_LEN = 48 -export const zhRegex = /^[\u4e00-\u9fa5]$/gm -export const emojiRegex = /^[\uD800-\uDBFF][\uDC00-\uDFFF]$/gm -export const emailRegex = /^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/ +export const zhRegex = /^[\u4e00-\u9fa5]$/m +export const emojiRegex = /^[\uD800-\uDBFF][\uDC00-\uDFFF]$/m +export const emailRegex = /^[\w\.-]+@([\w-]+\.)+[\w-]{2,}$/m const MAX_ZN_VAR_NAME_LENGHT = 8 const MAX_EN_VAR_VALUE_LENGHT = 16 export const getMaxVarNameLength = (value: string) => { diff --git a/web/context/app-context.ts b/web/context/app-context.ts deleted file mode 100644 index d31b9fedca..0000000000 --- a/web/context/app-context.ts +++ /dev/null @@ -1,27 +0,0 @@ -'use client' - -import { createContext, useContext } from 'use-context-selector' -import type { App } from '@/types/app' -import type { UserProfileResponse } from '@/models/common' - -export type AppContextValue = { - apps: App[] - mutateApps: () => void - userProfile: UserProfileResponse - mutateUserProfile: () => void -} - -const AppContext = createContext({ - apps: [], - mutateApps: () => { }, - userProfile: { - id: '', - name: '', - email: '', - }, - mutateUserProfile: () => { }, -}) - -export const useAppContext = () => useContext(AppContext) - -export default AppContext diff --git a/web/context/app-context.tsx b/web/context/app-context.tsx new file mode 100644 index 0000000000..90cfc5ec11 --- /dev/null +++ b/web/context/app-context.tsx @@ -0,0 +1,45 @@ +'use client' + +import { createContext, useContext, useContextSelector } from 'use-context-selector' +import type { App } from '@/types/app' +import type { UserProfileResponse } from '@/models/common' +import { createRef, FC, PropsWithChildren } from 'react' + +export const useSelector = (selector: (value: AppContextValue) => T): T => + useContextSelector(AppContext, selector); + +export type AppContextValue = { + apps: App[] + mutateApps: () => void + userProfile: UserProfileResponse + mutateUserProfile: () => void + pageContainerRef: React.RefObject, + useSelector: typeof useSelector, +} + +const AppContext = createContext({ + apps: [], + mutateApps: () => { }, + userProfile: { + id: '', + name: '', + email: '', + }, + mutateUserProfile: () => { }, + pageContainerRef: createRef(), + useSelector, +}) + +export type AppContextProviderProps = PropsWithChildren<{ + value: Omit +}> + +export const AppContextProvider: FC = ({ value, children }) => ( + + {children} + +) + +export const useAppContext = () => useContext(AppContext) + +export default AppContext diff --git a/web/i18n/lang/app.en.ts b/web/i18n/lang/app.en.ts index 73c2a5b1b6..aea93eb168 100644 --- a/web/i18n/lang/app.en.ts +++ b/web/i18n/lang/app.en.ts @@ -35,6 +35,10 @@ const translation = { appCreated: 'App created', appCreateFailed: 'Failed to create app', }, + emoji: { + ok: 'OK', + cancel: 'Cancel', + } } export default translation diff --git a/web/i18n/lang/app.zh.ts b/web/i18n/lang/app.zh.ts index 03fde52c35..467d85feeb 100644 --- a/web/i18n/lang/app.zh.ts +++ b/web/i18n/lang/app.zh.ts @@ -34,6 +34,10 @@ const translation = { appCreated: '应用已创建', appCreateFailed: '应用创建失败', }, + emoji: { + ok: '确认', + cancel: '取消', + } } export default translation diff --git a/web/i18n/lang/common.en.ts b/web/i18n/lang/common.en.ts index 6771cbc42c..fa73fbd8a8 100644 --- a/web/i18n/lang/common.en.ts +++ b/web/i18n/lang/common.en.ts @@ -50,6 +50,7 @@ const translation = { maxToken: 'Max token', maxTokenTip: 'Max tokens generated is 2,048 or 4,000, depending on the model. Prompt and completion share this limit. One token is roughly 1 English character.', + setToCurrentModelMaxTokenTip: 'Max token is updated to the maximum token of the current model 4,000.', }, tone: { Creative: 'Creative', @@ -147,12 +148,8 @@ const translation = { editKey: 'Edit', invalidApiKey: 'Invalid API key', azure: { - resourceName: 'Resource Name', - resourceNamePlaceholder: 'The name of your Azure OpenAI Resource.', - deploymentId: 'Deployment ID', - deploymentIdPlaceholder: 'The deployment name you chose when you deployed the model.', - apiVersion: 'API Version', - apiVersionPlaceholder: 'The API version to use for this operation.', + apiBase: 'API Base', + apiBasePlaceholder: 'The API Base URL of your Azure OpenAI Endpoint.', apiKey: 'API Key', apiKeyPlaceholder: 'Enter your API key here', helpTip: 'Learn Azure OpenAI Service', diff --git a/web/i18n/lang/common.zh.ts b/web/i18n/lang/common.zh.ts index a2f03a1bc1..496d27ad48 100644 --- a/web/i18n/lang/common.zh.ts +++ b/web/i18n/lang/common.zh.ts @@ -50,6 +50,7 @@ const translation = { maxToken: '最大 Token', maxTokenTip: '生成的最大令牌数为 2,048 或 4,000,取决于模型。提示和完成共享令牌数限制。一个令牌约等于 1 个英文或 4 个中文字符。', + setToCurrentModelMaxTokenTip: '最大令牌数更新为当前模型最大的令牌数 4,000。', }, tone: { Creative: '创意', @@ -148,14 +149,10 @@ const translation = { editKey: '编辑', invalidApiKey: '无效的 API 密钥', azure: { - resourceName: 'Resource Name', - resourceNamePlaceholder: 'The name of your Azure OpenAI Resource.', - deploymentId: 'Deployment ID', - deploymentIdPlaceholder: 'The deployment name you chose when you deployed the model.', - apiVersion: 'API Version', - apiVersionPlaceholder: 'The API version to use for this operation.', + apiBase: 'API Base', + apiBasePlaceholder: '输入您的 Azure OpenAI API Base 地址', apiKey: 'API Key', - apiKeyPlaceholder: 'Enter your API key here', + apiKeyPlaceholder: '输入你的 API 密钥', helpTip: '了解 Azure OpenAI Service', }, openaiHosted: { diff --git a/web/middleware.ts b/web/middleware.ts index 5b17cbd673..39ae80ee76 100644 --- a/web/middleware.ts +++ b/web/middleware.ts @@ -23,7 +23,11 @@ export const getLocale = (request: NextRequest): Locale => { } // match locale - const matchedLocale = match(languages, locales, i18n.defaultLocale) as Locale + let matchedLocale:Locale = i18n.defaultLocale + try { + // If languages is ['*'], Error would happen in match function. + matchedLocale = match(languages, locales, i18n.defaultLocale) as Locale + } catch(e) {} return matchedLocale } diff --git a/web/models/app.ts b/web/models/app.ts index ddafdfbc72..8c5bfd0fab 100644 --- a/web/models/app.ts +++ b/web/models/app.ts @@ -61,6 +61,10 @@ export type SiteConfig = { export type AppListResponse = { data: App[] + has_more: boolean + limit: number + page: number + total: number } export type AppDetailResponse = App diff --git a/web/models/common.ts b/web/models/common.ts index 21a74447e1..df19701a8d 100644 --- a/web/models/common.ts +++ b/web/models/common.ts @@ -54,20 +54,29 @@ export type Member = Pick }> = ({ params }) => { - return get('apps', params) as Promise +export const fetchAppList: Fetcher }> = ({ url, params }) => { + return get(url, { params }) as Promise } export const fetchAppDetail: Fetcher = ({ url, id }) => { @@ -16,8 +16,8 @@ export const fetchAppTemplates: Fetcher = return get(url) as Promise } -export const createApp: Fetcher = ({ name, mode, config }) => { - return post('apps', { body: { name, mode, model_config: config } }) as Promise +export const createApp: Fetcher = ({ name, icon, icon_background, mode, config }) => { + return post('apps', { body: { name, icon, icon_background, mode, model_config: config } }) as Promise } export const deleteApp: Fetcher = (appID) => { diff --git a/web/service/base.ts b/web/service/base.ts index 737f8a1c6a..c6e841e5ac 100644 --- a/web/service/base.ts +++ b/web/service/base.ts @@ -1,4 +1,4 @@ -import { API_PREFIX, MOCK_API_PREFIX, PUBLIC_API_PREFIX, IS_CE_EDITION } from '@/config' +import { API_PREFIX, PUBLIC_API_PREFIX, IS_CE_EDITION } from '@/config' import Toast from '@/app/components/base/toast' const TIME_OUT = 100000 @@ -33,7 +33,6 @@ export type IOnError = (msg: string) => void type IOtherOptions = { isPublicAPI?: boolean - isMock?: boolean needAllResponseContent?: boolean onData?: IOnData // for stream onError?: IOnError @@ -79,7 +78,7 @@ const handleStream = (response: any, onData: IOnData, onCompleted?: IOnCompleted if (message.startsWith('data: ')) { // check if it starts with data: // console.log(message); bufferObj = JSON.parse(message.substring(6)) // remove data: and parse as json - if (bufferObj.status === 400) { + if (bufferObj.status === 400 || !bufferObj.event) { onData('', false, { conversationId: undefined, messageId: '', @@ -116,7 +115,14 @@ const handleStream = (response: any, onData: IOnData, onCompleted?: IOnCompleted read() } -const baseFetch = (url: string, fetchOptions: any, { isPublicAPI = false, isMock = false, needAllResponseContent }: IOtherOptions) => { +const baseFetch = ( + url: string, + fetchOptions: any, + { + isPublicAPI = false, + needAllResponseContent + }: IOtherOptions +) => { const options = Object.assign({}, baseOptions, fetchOptions) if (isPublicAPI) { const sharedToken = globalThis.location.pathname.split('/').slice(-1)[0] @@ -124,9 +130,6 @@ const baseFetch = (url: string, fetchOptions: any, { isPublicAPI = false, isMock } let urlPrefix = isPublicAPI ? PUBLIC_API_PREFIX : API_PREFIX - if (isMock) - urlPrefix = MOCK_API_PREFIX - let urlWithPrefix = `${urlPrefix}${url.startsWith('/') ? url : `/${url}`}` const { method, params, body } = options diff --git a/web/types/app.ts b/web/types/app.ts index 33ff1d5c67..a16bd92469 100644 --- a/web/types/app.ts +++ b/web/types/app.ts @@ -190,6 +190,12 @@ export type App = { id: string /** Name */ name: string + + /** Icon */ + icon: string + /** Icon Background */ + icon_background: string + /** Mode */ mode: AppMode /** Enable web app */