From 8340d775bd0806b34ddf9789c76998ce04b60b94 Mon Sep 17 00:00:00 2001 From: quicksand Date: Fri, 25 Jul 2025 09:00:26 +0800 Subject: [PATCH 001/415] Improve: support custom model parameters in auto-generator (#22924) --- api/controllers/console/app/generator.py | 7 ------- api/core/llm_generator/llm_generator.py | 17 ++++------------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 790369c052..4847a2cab8 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -1,5 +1,3 @@ -import os - from flask_login import current_user from flask_restful import Resource, reqparse @@ -29,15 +27,12 @@ class RuleGenerateApi(Resource): args = parser.parse_args() account = current_user - PROMPT_GENERATION_MAX_TOKENS = int(os.getenv("PROMPT_GENERATION_MAX_TOKENS", "512")) - try: rules = LLMGenerator.generate_rule_config( tenant_id=account.current_tenant_id, instruction=args["instruction"], model_config=args["model_config"], no_variable=args["no_variable"], - rule_config_max_tokens=PROMPT_GENERATION_MAX_TOKENS, ) except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) @@ -64,14 +59,12 @@ class RuleCodeGenerateApi(Resource): args = parser.parse_args() account = current_user - CODE_GENERATION_MAX_TOKENS = int(os.getenv("CODE_GENERATION_MAX_TOKENS", "1024")) try: code_result = LLMGenerator.generate_code( tenant_id=account.current_tenant_id, instruction=args["instruction"], model_config=args["model_config"], code_language=args["code_language"], - max_tokens=CODE_GENERATION_MAX_TOKENS, ) except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 331ac933c8..80f0457962 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -125,16 +125,13 @@ class LLMGenerator: return questions @classmethod - def generate_rule_config( - cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool, rule_config_max_tokens: int = 512 - ) -> dict: + def generate_rule_config(cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool) -> dict: output_parser = RuleConfigGeneratorOutputParser() error = "" error_step = "" rule_config = {"prompt": "", "variables": [], "opening_statement": "", "error": ""} - model_parameters = {"max_tokens": rule_config_max_tokens, "temperature": 0.01} - + model_parameters = model_config.get("completion_params", {}) if no_variable: prompt_template = PromptTemplateParser(WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE) @@ -276,12 +273,7 @@ class LLMGenerator: @classmethod def generate_code( - cls, - tenant_id: str, - instruction: str, - model_config: dict, - code_language: str = "javascript", - max_tokens: int = 1000, + cls, tenant_id: str, instruction: str, model_config: dict, code_language: str = "javascript" ) -> dict: if code_language == "python": prompt_template = PromptTemplateParser(PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE) @@ -305,8 +297,7 @@ class LLMGenerator: ) prompt_messages = [UserPromptMessage(content=prompt)] - model_parameters = {"max_tokens": max_tokens, "temperature": 0.01} - + model_parameters = model_config.get("completion_params", {}) try: response = cast( LLMResult, From 8bbed5aeeae0ae397062fb7aad1fe2501fbfa72c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 25 Jul 2025 09:04:36 +0800 Subject: [PATCH 002/415] chore: translate i18n files (#22934) Co-authored-by: JzoNgKVO <27049666+JzoNgKVO@users.noreply.github.com> --- web/i18n/de-DE/common.ts | 1 + web/i18n/es-ES/common.ts | 1 + web/i18n/fa-IR/common.ts | 1 + web/i18n/fr-FR/common.ts | 1 + web/i18n/hi-IN/common.ts | 1 + web/i18n/it-IT/common.ts | 1 + web/i18n/ko-KR/common.ts | 1 + web/i18n/pl-PL/common.ts | 1 + web/i18n/pt-BR/common.ts | 1 + web/i18n/ro-RO/common.ts | 1 + web/i18n/ru-RU/common.ts | 1 + web/i18n/sl-SI/common.ts | 1 + web/i18n/th-TH/common.ts | 1 + web/i18n/tr-TR/common.ts | 1 + web/i18n/uk-UA/common.ts | 1 + web/i18n/vi-VN/common.ts | 1 + web/i18n/zh-Hant/common.ts | 1 + 17 files changed, 17 insertions(+) diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts index 92e4916755..b8efe31ebc 100644 --- a/web/i18n/de-DE/common.ts +++ b/web/i18n/de-DE/common.ts @@ -237,6 +237,7 @@ const translation = { existingEmail: 'Ein Benutzer mit dieser E-Mail-Adresse existiert bereits.', emailLabel: 'Neue E-Mail', authTip: 'Sobald Ihre E-Mail geändert wurde, können Google- oder GitHub-Konten, die mit Ihrer alten E-Mail verknüpft sind, nicht mehr auf dieses Konto zugreifen.', + unAvailableEmail: 'Diese E-Mail ist vorübergehend nicht verfügbar.', }, }, members: { diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts index eba82dd384..a904bd82b9 100644 --- a/web/i18n/es-ES/common.ts +++ b/web/i18n/es-ES/common.ts @@ -241,6 +241,7 @@ const translation = { verifyNew: 'Verifica tu nuevo correo electrónico', codeLabel: 'Código de verificación', authTip: 'Una vez que tu correo electrónico sea cambiado, las cuentas de Google o GitHub vinculadas a tu antiguo correo electrónico ya no podrán iniciar sesión en esta cuenta.', + unAvailableEmail: 'Este correo electrónico no está disponible temporalmente.', }, }, members: { diff --git a/web/i18n/fa-IR/common.ts b/web/i18n/fa-IR/common.ts index c30319b0d2..018fbefa81 100644 --- a/web/i18n/fa-IR/common.ts +++ b/web/i18n/fa-IR/common.ts @@ -241,6 +241,7 @@ const translation = { content1: 'اگر ادامه دهید، ما یک کد تأیید به {{email}} برای بازگشایی مجدد ارسال خواهیم کرد.', content3: 'یک ایمیل جدید وارد کنید و ما یک کد تأیید برای شما ارسال خواهیم کرد.', authTip: 'زمانی که ایمیل شما تغییر کند، حساب‌های گوگل یا گیت‌هاب مرتبط با ایمیل قدیمی شما دیگر قادر به ورود به این حساب نخواهند بود.', + unAvailableEmail: 'این ایمیل به طور موقت در دسترس نیست.', }, }, members: { diff --git a/web/i18n/fr-FR/common.ts b/web/i18n/fr-FR/common.ts index 136e7de2ef..138c7662d6 100644 --- a/web/i18n/fr-FR/common.ts +++ b/web/i18n/fr-FR/common.ts @@ -237,6 +237,7 @@ const translation = { content2: 'Votre adresse e-mail actuelle est {{email}}. Un code de vérification a été envoyé à cette adresse e-mail.', codeLabel: 'Code de vérification', content1: 'Si vous continuez, nous enverrons un code de vérification à {{email}} pour une nouvelle authentification.', + unAvailableEmail: 'Cet e-mail est temporairement indisponible.', }, }, members: { diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts index 51e59449d5..6dfe10eef2 100644 --- a/web/i18n/hi-IN/common.ts +++ b/web/i18n/hi-IN/common.ts @@ -247,6 +247,7 @@ const translation = { content2: 'आपका वर्तमान ईमेल है {{email}}. सत्यापन कोड इस ईमेल पते पर भेजा गया है।', authTip: 'एक बार जब आपका ईमेल बदल दिया जाता है, तो आपके पुराने ईमेल से जुड़े Google या GitHub खाते इस खाते में लॉग इन नहीं कर सकेंगे।', content1: 'अगर आप जारी रखते हैं, तो हम सत्यापन के लिए {{email}} पर एक सत्यापन कोड भेजेंगे।', + unAvailableEmail: 'यह ईमेल अस्थायी रूप से अनुपलब्ध है।', }, }, members: { diff --git a/web/i18n/it-IT/common.ts b/web/i18n/it-IT/common.ts index 16991a94d8..1e74b299ef 100644 --- a/web/i18n/it-IT/common.ts +++ b/web/i18n/it-IT/common.ts @@ -249,6 +249,7 @@ const translation = { content4: 'Ti abbiamo appena inviato un codice di verifica temporaneo a {{email}}.', content1: 'Se continui, invieremo un codice di verifica a {{email}} per la riautenticazione.', sendVerifyCode: 'Invia codice di verifica', + unAvailableEmail: 'Questa email è temporaneamente non disponibile.', }, }, members: { diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts index a5ae3fd733..06f8f19ab3 100644 --- a/web/i18n/ko-KR/common.ts +++ b/web/i18n/ko-KR/common.ts @@ -233,6 +233,7 @@ const translation = { content3: '새로운 이메일을 입력하시면 인증 코드를 보내드립니다.', content1: '계속 진행하면, 재인증을 위해 {{email}}로 인증 코드를 전송하겠습니다.', authTip: '이메일이 변경되면, 이전 이메일에 연결된 Google 또는 GitHub 계정은 더 이상 이 계정에 로그인할 수 없습니다.', + unAvailableEmail: '이 이메일은 일시적으로 사용할 수 없습니다.', }, }, members: { diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts index 78c0f6e9fc..db9b8de950 100644 --- a/web/i18n/pl-PL/common.ts +++ b/web/i18n/pl-PL/common.ts @@ -243,6 +243,7 @@ const translation = { content2: 'Twój aktualny adres email to {{email}}. Kod weryfikacyjny został wysłany na ten adres email.', content4: 'Właśnie wysłaliśmy Ci tymczasowy kod weryfikacyjny na {{email}}.', authTip: 'Gdy twoje e-mail zostanie zmienione, konta Google lub GitHub powiązane z twoim starym e-mailem nie będą mogły już logować się do tego konta.', + unAvailableEmail: 'Ten email jest tymczasowo niedostępny.', }, }, members: { diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts index 8166f9d28c..8366894a3f 100644 --- a/web/i18n/pt-BR/common.ts +++ b/web/i18n/pt-BR/common.ts @@ -237,6 +237,7 @@ const translation = { newEmail: 'Crie um novo endereço de e-mail', content2: 'Seu email atual é {{email}}. O código de verificação foi enviado para este endereço de email.', content1: 'Se você continuar, enviaremos um código de verificação para {{email}} para reautenticação.', + unAvailableEmail: 'Este e-mail está temporariamente indisponível.', }, }, members: { diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts index dbc00bb134..2e578768f6 100644 --- a/web/i18n/ro-RO/common.ts +++ b/web/i18n/ro-RO/common.ts @@ -237,6 +237,7 @@ const translation = { content4: 'Tocmai ți-am trimis un cod de verificare temporar la {{email}}.', content2: 'Adresa ta de email curentă este {{email}}. Codul de verificare a fost trimis la această adresă de email.', emailLabel: 'Email nou', + unAvailableEmail: 'Acest email este temporar indisponibil.', }, }, members: { diff --git a/web/i18n/ru-RU/common.ts b/web/i18n/ru-RU/common.ts index 442efa3782..c761bd9c4c 100644 --- a/web/i18n/ru-RU/common.ts +++ b/web/i18n/ru-RU/common.ts @@ -241,6 +241,7 @@ const translation = { content3: 'Введите новый адрес электронной почты, и мы отправим вам код подтверждения.', content1: 'Если вы продолжите, мы отправим код подтверждения на {{email}} для повторной аутентификации.', authTip: 'После изменения вашего адреса электронной почты учетные записи Google или GitHub, связанные с вашим старым адресом, больше не смогут войти в эту учетную запись.', + unAvailableEmail: 'Этот email временно недоступен.', }, }, members: { diff --git a/web/i18n/sl-SI/common.ts b/web/i18n/sl-SI/common.ts index 43fe94b4d0..c7fbf9d7f5 100644 --- a/web/i18n/sl-SI/common.ts +++ b/web/i18n/sl-SI/common.ts @@ -241,6 +241,7 @@ const translation = { newEmail: 'Ustvarite nov e-poštni naslov', content2: 'Vaš trenutni elektronski naslov je {{email}}. Koda za preverjanje je bila poslana na ta elektronski naslov.', authTip: 'Ko bo vaš e-poštni naslov spremenjen, se računi Google ali GitHub, povezani z vašim starim e-poštnim naslovom, ne bodo mogli več prijaviti v ta račun.', + unAvailableEmail: 'Ta e-pošta trenutno ni na voljo.', }, }, members: { diff --git a/web/i18n/th-TH/common.ts b/web/i18n/th-TH/common.ts index d956c36716..dd7dd31cb1 100644 --- a/web/i18n/th-TH/common.ts +++ b/web/i18n/th-TH/common.ts @@ -236,6 +236,7 @@ const translation = { resendCount: 'ส่งอีกครั้งใน {{count}} วินาที', authTip: 'เมื่ออีเมลของคุณถูกเปลี่ยนแปลง บัญชี Google หรือบัญชี GitHub ที่เชื่อมโยงกับอีเมลเก่าของคุณจะไม่สามารถเข้าสู่ระบบบัญชีนี้ได้อีกต่อไป.', title: 'เปลี่ยนอีเมล', + unAvailableEmail: 'อีเมลนี้ไม่สามารถใช้งานได้ชั่วคราว.', }, }, members: { diff --git a/web/i18n/tr-TR/common.ts b/web/i18n/tr-TR/common.ts index d6caeba290..d907291146 100644 --- a/web/i18n/tr-TR/common.ts +++ b/web/i18n/tr-TR/common.ts @@ -241,6 +241,7 @@ const translation = { codeLabel: 'Doğrulama kodu', content2: 'Sizin mevcut e-posta adresiniz {{email}}. Doğrulama kodu bu e-posta adresine gönderilmiştir.', authTip: 'E-posta adresiniz değiştiğinde, eski e-posta adresinize bağlı Google veya GitHub hesapları bu hesaba giriş yapamayacak.', + unAvailableEmail: 'Bu e-posta geçici olarak kullanılamıyor.', }, }, members: { diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts index 1ec367d481..fca3674e12 100644 --- a/web/i18n/uk-UA/common.ts +++ b/web/i18n/uk-UA/common.ts @@ -237,6 +237,7 @@ const translation = { content3: 'Введіть нову електронну адресу, і ми надішлемо вам код підтвердження.', authTip: 'Коли ви зміните свою електронну адресу, облікові записи Google або GitHub, пов\'язані з вашою старою електронною адресою, більше не зможуть увійти в цей обліковий запис.', content1: 'Якщо ви продовжите, ми надішлемо код підтвердження на {{email}} для повторної аутентифікації.', + unAvailableEmail: 'Цей електронний лист тимчасово недоступний.', }, }, members: { diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts index 084c7bcb48..24058264d3 100644 --- a/web/i18n/vi-VN/common.ts +++ b/web/i18n/vi-VN/common.ts @@ -237,6 +237,7 @@ const translation = { verifyEmail: 'Xác minh email hiện tại của bạn', codePlaceholder: 'Dán mã 6 chữ số', authTip: 'Khi email của bạn được thay đổi, các tài khoản Google hoặc GitHub liên kết với email cũ của bạn sẽ không còn có thể đăng nhập vào tài khoản này.', + unAvailableEmail: 'Email này hiện không khả dụng tạm thời.', }, }, members: { diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts index 9f71b13d61..6404d0e003 100644 --- a/web/i18n/zh-Hant/common.ts +++ b/web/i18n/zh-Hant/common.ts @@ -237,6 +237,7 @@ const translation = { existingEmail: '此電子郵件的用戶已經存在。', authTip: '一旦您的電子郵件更改,與您的舊電子郵件相關聯的 Google 或 GitHub 帳戶將無法再登錄此帳戶。', resendTip: '沒有收到代碼嗎?', + unAvailableEmail: '此郵件暫時無法使用。', }, }, members: { From 89415ac453ff9d2e2025ecd0f61dc95d85f741c0 Mon Sep 17 00:00:00 2001 From: J2M3L2 <10914840+J2M3L2@users.noreply.github.com> Date: Fri, 25 Jul 2025 09:05:47 +0800 Subject: [PATCH 003/415] fix: improve PostgreSQL healthcheck cmd to avoid fatal log errors (#22749) (#22917) --- docker/docker-compose.middleware.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index 3408fef0c2..9f7cc72586 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -20,7 +20,7 @@ services: ports: - "${EXPOSE_POSTGRES_PORT:-5432}:5432" healthcheck: - test: [ "CMD", "pg_isready" ] + test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] interval: 1s timeout: 3s retries: 30 From 88952cbb7a7bc629523f1b3c9b800956bed5152a Mon Sep 17 00:00:00 2001 From: Jason Young <44939412+farion1231@users.noreply.github.com> Date: Fri, 25 Jul 2025 10:30:19 +0800 Subject: [PATCH 004/415] test: add comprehensive tests for file_factory build_from_mapping (#22926) --- .../factories/test_build_from_mapping.py | 84 ++++++++++++++++++- 1 file changed, 81 insertions(+), 3 deletions(-) diff --git a/api/tests/unit_tests/factories/test_build_from_mapping.py b/api/tests/unit_tests/factories/test_build_from_mapping.py index d42c4412f5..39280c9267 100644 --- a/api/tests/unit_tests/factories/test_build_from_mapping.py +++ b/api/tests/unit_tests/factories/test_build_from_mapping.py @@ -21,7 +21,7 @@ TEST_REMOTE_URL = "http://example.com/test.jpg" # Test Config TEST_CONFIG = FileUploadConfig( - allowed_file_types=["image", "document"], + allowed_file_types=[FileType.IMAGE, FileType.DOCUMENT], allowed_file_extensions=[".jpg", ".pdf"], allowed_file_upload_methods=[FileTransferMethod.LOCAL_FILE, FileTransferMethod.TOOL_FILE], number_limits=10, @@ -171,10 +171,10 @@ def test_build_without_type_specification(mock_upload_file): mapping = { "transfer_method": "local_file", "upload_file_id": TEST_UPLOAD_FILE_ID, - # leave out the type + # type field is intentionally omitted } file = build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID) - # It should automatically infer the type as "image" based on the file extension + # Should automatically infer the type as "image" based on the file extension assert file.type == FileType.IMAGE @@ -194,3 +194,81 @@ def test_file_validation_with_config(mock_upload_file, file_type, should_pass, e else: with pytest.raises(ValueError, match=expected_error): build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, config=TEST_CONFIG) + + +def test_invalid_transfer_method(): + """Test that invalid transfer method raises ValueError.""" + mapping = { + "transfer_method": "invalid_method", + "upload_file_id": TEST_UPLOAD_FILE_ID, + "type": "image", + } + with pytest.raises(ValueError, match="No matching enum found for value 'invalid_method'"): + build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID) + + +def test_invalid_uuid_format(): + """Test that invalid UUID format raises ValueError.""" + mapping = { + "transfer_method": "local_file", + "upload_file_id": "not-a-valid-uuid", + "type": "image", + } + with pytest.raises(ValueError, match="Invalid upload file id format"): + build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID) + + +def test_tenant_mismatch(): + """Test that tenant mismatch raises security error.""" + # Create a mock upload file with a different tenant_id + mock_file = MagicMock(spec=UploadFile) + mock_file.id = TEST_UPLOAD_FILE_ID + mock_file.tenant_id = "different_tenant_id" + mock_file.name = "test.jpg" + mock_file.extension = "jpg" + mock_file.mime_type = "image/jpeg" + mock_file.source_url = TEST_REMOTE_URL + mock_file.size = 1024 + mock_file.key = "test_key" + + # Mock the database query to return None (no file found for this tenant) + with patch("factories.file_factory.db.session.scalar", return_value=None): + mapping = local_file_mapping() + with pytest.raises(ValueError, match="Invalid upload file"): + build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID) + + +def test_disallowed_file_types(mock_upload_file): + """Test that disallowed file types are rejected.""" + # Config that only allows image and document types + restricted_config = FileUploadConfig( + allowed_file_types=[FileType.IMAGE, FileType.DOCUMENT], + ) + + # Try to upload a video file + mapping = local_file_mapping(file_type="video") + with pytest.raises(ValueError, match="File validation failed"): + build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, config=restricted_config) + + +def test_disallowed_extensions(mock_upload_file): + """Test that disallowed file extensions are rejected for custom type.""" + # Mock a file with .exe extension + mock_upload_file.return_value.extension = "exe" + mock_upload_file.return_value.name = "malicious.exe" + mock_upload_file.return_value.mime_type = "application/x-msdownload" + + # Config that only allows specific extensions for custom files + restricted_config = FileUploadConfig( + allowed_file_extensions=[".txt", ".csv", ".json"], + ) + + # Mapping without specifying type (will be detected as custom) + mapping = { + "transfer_method": "local_file", + "upload_file_id": TEST_UPLOAD_FILE_ID, + "type": "custom", + } + + with pytest.raises(ValueError, match="File validation failed"): + build_from_mapping(mapping=mapping, tenant_id=TEST_TENANT_ID, config=restricted_config) From 570aee5fe6b6bc29d6c9463625ce5c0412d035b9 Mon Sep 17 00:00:00 2001 From: HyaCinth <88471803+HyaCiovo@users.noreply.github.com> Date: Fri, 25 Jul 2025 10:30:52 +0800 Subject: [PATCH 005/415] fix: Optimize AppInfo component styles and fix CustomizeModal step display (#22930) (#22935) --- web/app/components/app-sidebar/app-info.tsx | 2 +- web/app/components/app/overview/customize/index.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/app-sidebar/app-info.tsx b/web/app/components/app-sidebar/app-info.tsx index c35047bbc5..58c9f7e5ca 100644 --- a/web/app/components/app-sidebar/app-info.tsx +++ b/web/app/components/app-sidebar/app-info.tsx @@ -322,7 +322,7 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx className='flex flex-1 flex-col gap-2 overflow-auto px-2 py-1' /> -
+
) } @@ -169,6 +195,15 @@ const HeaderOptions: FC = ({ /> ) } + { + showClearConfirm && ( + setShowClearConfirm(false)} + onConfirm={handleConfirmed} + /> + ) + }
) } diff --git a/web/i18n/en-US/app-annotation.ts b/web/i18n/en-US/app-annotation.ts index 43f24a7619..c0a8008d9a 100644 --- a/web/i18n/en-US/app-annotation.ts +++ b/web/i18n/en-US/app-annotation.ts @@ -16,7 +16,8 @@ const translation = { addAnnotation: 'Add Annotation', bulkImport: 'Bulk Import', bulkExport: 'Bulk Export', - clearAll: 'Clear All Annotation', + clearAll: 'Delete All', + clearAllConfirm: 'Delete all annotations?', }, }, editModal: { diff --git a/web/i18n/ja-JP/app-annotation.ts b/web/i18n/ja-JP/app-annotation.ts index 38b891d9d8..7dbdfe018f 100644 --- a/web/i18n/ja-JP/app-annotation.ts +++ b/web/i18n/ja-JP/app-annotation.ts @@ -18,7 +18,8 @@ const translation = { addAnnotation: '注釈を追加', bulkImport: '一括インポート', bulkExport: '一括エクスポート', - clearAll: 'すべての注釈をクリア', + clearAll: 'すべて削除', + clearAllConfirm: 'すべての寸法を削除?', }, }, editModal: { diff --git a/web/i18n/zh-Hans/app-annotation.ts b/web/i18n/zh-Hans/app-annotation.ts index 3a6cacf5b5..44d075715f 100644 --- a/web/i18n/zh-Hans/app-annotation.ts +++ b/web/i18n/zh-Hans/app-annotation.ts @@ -18,7 +18,8 @@ const translation = { addAnnotation: '添加标注', bulkImport: '批量导入', bulkExport: '批量导出', - clearAll: '删除所有标注', + clearAll: '删除所有', + clearAllConfirm: '删除所有标注?', }, }, editModal: { diff --git a/web/service/annotation.ts b/web/service/annotation.ts index 5096a4f58a..9f025f8eb9 100644 --- a/web/service/annotation.ts +++ b/web/service/annotation.ts @@ -63,3 +63,7 @@ export const delAnnotation = (appId: string, annotationId: string) => { export const fetchHitHistoryList = (appId: string, annotationId: string, params: Record) => { return get(`apps/${appId}/annotations/${annotationId}/hit-histories`, { params }) } + +export const clearAllAnnotations = (appId: string): Promise => { + return del(`apps/${appId}/annotations`) +} From faaf828dff4cb0b6c2055d7872b0abb95d8fbd6b Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Fri, 25 Jul 2025 23:38:16 +0800 Subject: [PATCH 012/415] Remove redundant condition check (#22983) Signed-off-by: Yongtao Huang --- .../knowledge_retrieval_node.py | 55 +++++++++---------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 34b0afc75d..e041e217ca 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -453,35 +453,34 @@ class KnowledgeRetrievalNode(BaseNode): elif node_data.metadata_filtering_mode == "manual": if node_data.metadata_filtering_conditions: conditions = [] - if node_data.metadata_filtering_conditions: - for sequence, condition in enumerate(node_data.metadata_filtering_conditions.conditions): # type: ignore - metadata_name = condition.name - expected_value = condition.value - if expected_value is not None and condition.comparison_operator not in ("empty", "not empty"): - if isinstance(expected_value, str): - expected_value = self.graph_runtime_state.variable_pool.convert_template( - expected_value - ).value[0] - if expected_value.value_type in {"number", "integer", "float"}: # type: ignore - expected_value = expected_value.value # type: ignore - elif expected_value.value_type == "string": # type: ignore - expected_value = re.sub(r"[\r\n\t]+", " ", expected_value.text).strip() # type: ignore - else: - raise ValueError("Invalid expected metadata value type") - conditions.append( - Condition( - name=metadata_name, - comparison_operator=condition.comparison_operator, - value=expected_value, - ) - ) - filters = self._process_metadata_filter_func( - sequence, - condition.comparison_operator, - metadata_name, - expected_value, - filters, + for sequence, condition in enumerate(node_data.metadata_filtering_conditions.conditions): # type: ignore + metadata_name = condition.name + expected_value = condition.value + if expected_value is not None and condition.comparison_operator not in ("empty", "not empty"): + if isinstance(expected_value, str): + expected_value = self.graph_runtime_state.variable_pool.convert_template( + expected_value + ).value[0] + if expected_value.value_type in {"number", "integer", "float"}: # type: ignore + expected_value = expected_value.value # type: ignore + elif expected_value.value_type == "string": # type: ignore + expected_value = re.sub(r"[\r\n\t]+", " ", expected_value.text).strip() # type: ignore + else: + raise ValueError("Invalid expected metadata value type") + conditions.append( + Condition( + name=metadata_name, + comparison_operator=condition.comparison_operator, + value=expected_value, ) + ) + filters = self._process_metadata_filter_func( + sequence, + condition.comparison_operator, + metadata_name, + expected_value, + filters, + ) metadata_condition = MetadataCondition( logical_operator=node_data.metadata_filtering_conditions.logical_operator, conditions=conditions, From 773932b1e7374e7997021271158cd3bc450955bc Mon Sep 17 00:00:00 2001 From: znn Date: Sat, 26 Jul 2025 08:07:52 +0530 Subject: [PATCH 013/415] adding mcp error in toast (#22987) --- web/app/components/tools/mcp/modal.tsx | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index 88e831bc3a..b7202f5242 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -95,8 +95,12 @@ const MCPModal = ({ setAppIcon({ type: 'image', url: res.url, fileId: extractFileId(res.url) || '' }) } catch (e) { + let errorMessage = 'Failed to fetch remote icon' + const errorData = await (e as Response).json() + if (errorData?.code) + errorMessage = `Upload failed: ${errorData.code}` console.error('Failed to fetch remote icon:', e) - Toast.notify({ type: 'warning', message: 'Failed to fetch remote icon' }) + Toast.notify({ type: 'warning', message: errorMessage }) } finally { setIsFetchingIcon(false) From 1446f19709329ba3701a5e261fc30ba0f9ef8040 Mon Sep 17 00:00:00 2001 From: Dylan Jiang <74290639+weijunjiang123@users.noreply.github.com> Date: Sat, 26 Jul 2025 10:53:59 +0800 Subject: [PATCH 014/415] fix: Update trigger styles for disabled state in PureSelect component (#22986) --- web/app/components/base/select/pure.tsx | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/web/app/components/base/select/pure.tsx b/web/app/components/base/select/pure.tsx index 046f32676a..0905cd56ff 100644 --- a/web/app/components/base/select/pure.tsx +++ b/web/app/components/base/select/pure.tsx @@ -92,12 +92,13 @@ const PureSelect = ({ > !disabled && handleOpenChange(!mergedOpen)} - asChild - > + asChild >
From 3328addb58f522f4d72e7a97104557499130b567 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 26 Jul 2025 18:28:28 +0800 Subject: [PATCH 015/415] fix: eliminate dark mode flicker by moving ThemeProvider to root level (#22996) --- web/__tests__/real-browser-flicker.test.tsx | 445 ++++++++++++++++++++ web/app/layout.tsx | 27 +- 2 files changed, 459 insertions(+), 13 deletions(-) create mode 100644 web/__tests__/real-browser-flicker.test.tsx diff --git a/web/__tests__/real-browser-flicker.test.tsx b/web/__tests__/real-browser-flicker.test.tsx new file mode 100644 index 0000000000..cf3abd5f80 --- /dev/null +++ b/web/__tests__/real-browser-flicker.test.tsx @@ -0,0 +1,445 @@ +/** + * Real Browser Environment Dark Mode Flicker Test + * + * This test attempts to simulate real browser refresh scenarios including: + * 1. SSR HTML generation phase + * 2. Client-side JavaScript loading + * 3. Theme system initialization + * 4. CSS styles application timing + */ + +import { render, screen, waitFor } from '@testing-library/react' +import { ThemeProvider } from 'next-themes' +import useTheme from '@/hooks/use-theme' +import { useEffect, useState } from 'react' + +// Setup browser environment for testing +const setupMockEnvironment = (storedTheme: string | null, systemPrefersDark = false) => { + // Mock localStorage + const mockStorage = { + getItem: jest.fn((key: string) => { + if (key === 'theme') return storedTheme + return null + }), + setItem: jest.fn(), + removeItem: jest.fn(), + } + + // Mock system theme preference + const mockMatchMedia = jest.fn((query: string) => ({ + matches: query.includes('dark') && systemPrefersDark, + media: query, + addListener: jest.fn(), + removeListener: jest.fn(), + })) + + if (typeof window !== 'undefined') { + Object.defineProperty(window, 'localStorage', { + value: mockStorage, + configurable: true, + }) + + Object.defineProperty(window, 'matchMedia', { + value: mockMatchMedia, + configurable: true, + }) + } + + return { mockStorage, mockMatchMedia } +} + +// Simulate real page component based on Dify's actual theme usage +const PageComponent = () => { + const [mounted, setMounted] = useState(false) + const { theme } = useTheme() + + useEffect(() => { + setMounted(true) + }, []) + + // Simulate common theme usage pattern in Dify + const isDark = mounted ? theme === 'dark' : false + + return ( +
+
+

+ Dify Application +

+
+ Current Theme: {mounted ? theme : 'unknown'} +
+
+ Appearance: {isDark ? 'dark' : 'light'} +
+
+
+ ) +} + +const TestThemeProvider = ({ children }: { children: React.ReactNode }) => ( + + {children} + +) + +describe('Real Browser Environment Dark Mode Flicker Test', () => { + beforeEach(() => { + jest.clearAllMocks() + }) + + describe('Page Refresh Scenario Simulation', () => { + test('simulates complete page loading process with dark theme', async () => { + // Setup: User previously selected dark mode + setupMockEnvironment('dark') + + render( + + + , + ) + + // Check initial client-side rendering state + const initialState = { + theme: screen.getByTestId('theme-indicator').textContent, + appearance: screen.getByTestId('visual-appearance').textContent, + } + console.log('Initial client state:', initialState) + + // Wait for theme system to fully initialize + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toHaveTextContent('Current Theme: dark') + }) + + const finalState = { + theme: screen.getByTestId('theme-indicator').textContent, + appearance: screen.getByTestId('visual-appearance').textContent, + } + console.log('Final state:', finalState) + + // Document the state change - this is the source of flicker + console.log('State change detection: Initial -> Final') + }) + + test('handles light theme correctly', async () => { + setupMockEnvironment('light') + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toHaveTextContent('Current Theme: light') + }) + + expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: light') + }) + + test('handles system theme with dark preference', async () => { + setupMockEnvironment('system', true) // system theme, dark preference + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toHaveTextContent('Current Theme: dark') + }) + + expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: dark') + }) + + test('handles system theme with light preference', async () => { + setupMockEnvironment('system', false) // system theme, light preference + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toHaveTextContent('Current Theme: light') + }) + + expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: light') + }) + + test('handles no stored theme (defaults to system)', async () => { + setupMockEnvironment(null, false) // no stored theme, system prefers light + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toHaveTextContent('Current Theme: light') + }) + }) + + test('measures timing window of style changes', async () => { + setupMockEnvironment('dark') + + const timingData: Array<{ phase: string; timestamp: number; styles: any }> = [] + + const TimingPageComponent = () => { + const [mounted, setMounted] = useState(false) + const { theme } = useTheme() + const isDark = mounted ? theme === 'dark' : false + + // Record timing and styles for each render phase + const currentStyles = { + backgroundColor: isDark ? '#1f2937' : '#ffffff', + color: isDark ? '#ffffff' : '#000000', + } + + timingData.push({ + phase: mounted ? 'CSR' : 'Initial', + timestamp: performance.now(), + styles: currentStyles, + }) + + useEffect(() => { + setMounted(true) + }, []) + + return ( +
+
+ Phase: {mounted ? 'CSR' : 'Initial'} | Theme: {theme} | Visual: {isDark ? 'dark' : 'light'} +
+
+ ) + } + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('timing-status')).toHaveTextContent('Phase: CSR') + }) + + // Analyze timing and style changes + console.log('\n=== Style Change Timeline ===') + timingData.forEach((data, index) => { + console.log(`${index + 1}. ${data.phase}: bg=${data.styles.backgroundColor}, color=${data.styles.color}`) + }) + + // Check if there are style changes (this is visible flicker) + const hasStyleChange = timingData.length > 1 + && timingData[0].styles.backgroundColor !== timingData[timingData.length - 1].styles.backgroundColor + + if (hasStyleChange) + console.log('⚠️ Style changes detected - this causes visible flicker') + else + console.log('✅ No style changes detected') + + expect(timingData.length).toBeGreaterThan(1) + }) + }) + + describe('CSS Application Timing Tests', () => { + test('checks CSS class changes causing flicker', async () => { + setupMockEnvironment('dark') + + const cssStates: Array<{ className: string; timestamp: number }> = [] + + const CSSTestComponent = () => { + const [mounted, setMounted] = useState(false) + const { theme } = useTheme() + const isDark = mounted ? theme === 'dark' : false + + // Simulate Tailwind CSS class application + const className = `min-h-screen ${isDark ? 'bg-gray-900 text-white' : 'bg-white text-black'}` + + cssStates.push({ + className, + timestamp: performance.now(), + }) + + useEffect(() => { + setMounted(true) + }, []) + + return ( +
+
Classes: {className}
+
+ ) + } + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('css-classes')).toHaveTextContent('bg-gray-900 text-white') + }) + + console.log('\n=== CSS Class Change Detection ===') + cssStates.forEach((state, index) => { + console.log(`${index + 1}. ${state.className}`) + }) + + // Check if CSS classes have changed + const hasCSSChange = cssStates.length > 1 + && cssStates[0].className !== cssStates[cssStates.length - 1].className + + if (hasCSSChange) { + console.log('⚠️ CSS class changes detected - may cause style flicker') + console.log(`From: "${cssStates[0].className}"`) + console.log(`To: "${cssStates[cssStates.length - 1].className}"`) + } + + expect(hasCSSChange).toBe(true) // We expect to see this change + }) + }) + + describe('Edge Cases and Error Handling', () => { + test('handles localStorage access errors gracefully', async () => { + // Mock localStorage to throw an error + const mockStorage = { + getItem: jest.fn(() => { + throw new Error('LocalStorage access denied') + }), + setItem: jest.fn(), + removeItem: jest.fn(), + } + + if (typeof window !== 'undefined') { + Object.defineProperty(window, 'localStorage', { + value: mockStorage, + configurable: true, + }) + } + + render( + + + , + ) + + // Should fallback gracefully without crashing + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toBeInTheDocument() + }) + + // Should default to light theme when localStorage fails + expect(screen.getByTestId('visual-appearance')).toHaveTextContent('Appearance: light') + }) + + test('handles invalid theme values in localStorage', async () => { + setupMockEnvironment('invalid-theme-value') + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('theme-indicator')).toBeInTheDocument() + }) + + // Should handle invalid values gracefully + const themeIndicator = screen.getByTestId('theme-indicator') + expect(themeIndicator).toBeInTheDocument() + }) + }) + + describe('Performance and Regression Tests', () => { + test('verifies ThemeProvider position fix reduces initialization delay', async () => { + const performanceMarks: Array<{ event: string; timestamp: number }> = [] + + const PerformanceTestComponent = () => { + const [mounted, setMounted] = useState(false) + const { theme } = useTheme() + + performanceMarks.push({ event: 'component-render', timestamp: performance.now() }) + + useEffect(() => { + performanceMarks.push({ event: 'mount-start', timestamp: performance.now() }) + setMounted(true) + performanceMarks.push({ event: 'mount-complete', timestamp: performance.now() }) + }, []) + + useEffect(() => { + if (theme) + performanceMarks.push({ event: 'theme-available', timestamp: performance.now() }) + }, [theme]) + + return ( +
+ Mounted: {mounted.toString()} | Theme: {theme || 'loading'} +
+ ) + } + + setupMockEnvironment('dark') + + render( + + + , + ) + + await waitFor(() => { + expect(screen.getByTestId('performance-test')).toHaveTextContent('Theme: dark') + }) + + // Analyze performance timeline + console.log('\n=== Performance Timeline ===') + performanceMarks.forEach((mark) => { + console.log(`${mark.event}: ${mark.timestamp.toFixed(2)}ms`) + }) + + expect(performanceMarks.length).toBeGreaterThan(3) + }) + }) + + describe('Solution Requirements Definition', () => { + test('defines technical requirements to eliminate flicker', () => { + const technicalRequirements = { + ssrConsistency: 'SSR and CSR must render identical initial styles', + synchronousDetection: 'Theme detection must complete synchronously before first render', + noStyleChanges: 'No visible style changes should occur after hydration', + performanceImpact: 'Solution should not significantly impact page load performance', + browserCompatibility: 'Must work consistently across all major browsers', + } + + console.log('\n=== Technical Requirements ===') + Object.entries(technicalRequirements).forEach(([key, requirement]) => { + console.log(`${key}: ${requirement}`) + expect(requirement).toBeDefined() + }) + + // A successful solution should pass all these requirements + }) + }) +}) diff --git a/web/app/layout.tsx b/web/app/layout.tsx index 0f0ea0f705..46afd95b97 100644 --- a/web/app/layout.tsx +++ b/web/app/layout.tsx @@ -62,24 +62,25 @@ const LocaleLayout = async ({ className="color-scheme h-full select-auto" {...datasetMap} > - - - - + + + + {children} - - - - + + + + From 5411fd3757e39a6ab61769b1322512b48702492c Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sat, 26 Jul 2025 18:29:03 +0800 Subject: [PATCH 016/415] Fix: correct misplaced `ensure_ascii=False` (#22997) Signed-off-by: Yongtao Huang --- .../nodes/question_classifier/question_classifier_node.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 15012fa48d..3e4984ecd5 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -385,9 +385,8 @@ class QuestionClassifierNode(BaseNode): text=QUESTION_CLASSIFIER_COMPLETION_PROMPT.format( histories=memory_str, input_text=input_text, - categories=json.dumps(categories), + categories=json.dumps(categories, ensure_ascii=False), classification_instructions=instruction, - ensure_ascii=False, ) ) From e0fe158f0bd3c4599a4a5874b72f5f1a7879d388 Mon Sep 17 00:00:00 2001 From: znn Date: Sun, 27 Jul 2025 06:40:04 +0530 Subject: [PATCH 017/415] node title number on copied iteration node (#23004) --- .../workflow/nodes/iteration/use-interactions.ts | 10 +++++++++- web/app/components/workflow/types.ts | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/iteration/use-interactions.ts b/web/app/components/workflow/nodes/iteration/use-interactions.ts index c294cfd6aa..e0c0b222aa 100644 --- a/web/app/components/workflow/nodes/iteration/use-interactions.ts +++ b/web/app/components/workflow/nodes/iteration/use-interactions.ts @@ -4,6 +4,7 @@ import { useTranslation } from 'react-i18next' import { useStoreApi } from 'reactflow' import type { BlockEnum, + ChildNodeTypeCount, Node, } from '../../types' import { @@ -113,10 +114,17 @@ export const useNodeIterationInteractions = () => { const nodes = getNodes() const childrenNodes = nodes.filter(n => n.parentId === nodeId && n.type !== CUSTOM_ITERATION_START_NODE) const newIdMapping = { ...idMapping } + const childNodeTypeCount: ChildNodeTypeCount = {} const copyChildren = childrenNodes.map((child, index) => { const childNodeType = child.data.type as BlockEnum const nodesWithSameType = nodes.filter(node => node.data.type === childNodeType) + + if(!childNodeTypeCount[childNodeType]) + childNodeTypeCount[childNodeType] = nodesWithSameType.length + 1 + else + childNodeTypeCount[childNodeType] = childNodeTypeCount[childNodeType] + 1 + const { newNode } = generateNewNode({ type: getNodeCustomTypeByNodeDataType(childNodeType), data: { @@ -126,7 +134,7 @@ export const useNodeIterationInteractions = () => { _isBundled: false, _connectedSourceHandleIds: [], _connectedTargetHandleIds: [], - title: nodesWithSameType.length > 0 ? `${t(`workflow.blocks.${childNodeType}`)} ${nodesWithSameType.length + 1}` : t(`workflow.blocks.${childNodeType}`), + title: nodesWithSameType.length > 0 ? `${t(`workflow.blocks.${childNodeType}`)} ${childNodeTypeCount[childNodeType]}` : t(`workflow.blocks.${childNodeType}`), iteration_id: newNodeId, }, position: child.position, diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index 5f36956798..d8153cf08f 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -446,3 +446,7 @@ export enum VersionHistoryContextMenuOptions { edit = 'edit', delete = 'delete', } + +export interface ChildNodeTypeCount { + [key: string]: number; +} From d776a7cde79750fd5fea8134257abb2a4286feed Mon Sep 17 00:00:00 2001 From: znn Date: Sun, 27 Jul 2025 06:49:13 +0530 Subject: [PATCH 018/415] adding LANG LC_ALL PYTHONIOENCODING UTF-8 (#22928) Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- api/.env.example | 5 +++++ api/Dockerfile | 5 +++++ api/docker/entrypoint.sh | 5 +++++ docker/.env.example | 5 +++++ docker/docker-compose.yaml | 3 +++ 5 files changed, 23 insertions(+) diff --git a/api/.env.example b/api/.env.example index 80b1c12cd8..18f2dbf647 100644 --- a/api/.env.example +++ b/api/.env.example @@ -4,6 +4,11 @@ # Alternatively you can set it with `SECRET_KEY` environment variable. SECRET_KEY= +# Ensure UTF-8 encoding +LANG=en_US.UTF-8 +LC_ALL=en_US.UTF-8 +PYTHONIOENCODING=utf-8 + # Console API base URL CONSOLE_API_URL=http://localhost:5001 CONSOLE_WEB_URL=http://localhost:3000 diff --git a/api/Dockerfile b/api/Dockerfile index 8c7a1717b9..e097b5811e 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -37,6 +37,11 @@ EXPOSE 5001 # set timezone ENV TZ=UTC +# Set UTF-8 locale +ENV LANG=en_US.UTF-8 +ENV LC_ALL=en_US.UTF-8 +ENV PYTHONIOENCODING=utf-8 + WORKDIR /app/api RUN \ diff --git a/api/docker/entrypoint.sh b/api/docker/entrypoint.sh index 4de9a25c2f..a850ea9a50 100755 --- a/api/docker/entrypoint.sh +++ b/api/docker/entrypoint.sh @@ -2,6 +2,11 @@ set -e +# Set UTF-8 encoding to address potential encoding issues in containerized environments +export LANG=${LANG:-en_US.UTF-8} +export LC_ALL=${LC_ALL:-en_US.UTF-8} +export PYTHONIOENCODING=${PYTHONIOENCODING:-utf-8} + if [[ "${MIGRATION_ENABLED}" == "true" ]]; then echo "Running migrations" flask upgrade-db diff --git a/docker/.env.example b/docker/.env.example index 88cc544730..9d15ba53d3 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -52,6 +52,11 @@ FILES_URL= # Example: INTERNAL_FILES_URL=http://api:5001 INTERNAL_FILES_URL= +# Ensure UTF-8 encoding +LANG=en_US.UTF-8 +LC_ALL=en_US.UTF-8 +PYTHONIOENCODING=utf-8 + # ------------------------------ # Server Configuration # ------------------------------ diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index c2ef2ff723..2c1429b5da 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -12,6 +12,9 @@ x-shared-env: &shared-api-worker-env APP_WEB_URL: ${APP_WEB_URL:-} FILES_URL: ${FILES_URL:-} INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-} + LANG: ${LANG:-en_US.UTF-8} + LC_ALL: ${LC_ALL:-en_US.UTF-8} + PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8} LOG_LEVEL: ${LOG_LEVEL:-INFO} LOG_FILE: ${LOG_FILE:-/app/logs/server.log} LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} From 665fcad6551dfa0f95709692b05db927b98071bf Mon Sep 17 00:00:00 2001 From: Guangdong Liu <804167098@qq.com> Date: Sun, 27 Jul 2025 09:22:36 +0800 Subject: [PATCH 019/415] fix: resolve cross-page document selection issue in metadata batch edit (#23000) Co-authored-by: crazywoola <427733928@qq.com> --- web/app/components/datasets/documents/index.tsx | 9 +++++++-- web/app/components/datasets/documents/list.tsx | 3 ++- .../hooks/use-batch-edit-document-metadata.ts | 13 +++++++++---- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/web/app/components/datasets/documents/index.tsx b/web/app/components/datasets/documents/index.tsx index 676581a50f..1f9f36e7b1 100644 --- a/web/app/components/datasets/documents/index.tsx +++ b/web/app/components/datasets/documents/index.tsx @@ -164,7 +164,6 @@ const Documents: FC = ({ datasetId }) => { if (totalPages < currPage + 1) setCurrPage(totalPages === 0 ? 0 : totalPages - 1) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, [documentsRes]) const invalidDocumentDetail = useInvalidDocumentDetailKey() @@ -178,7 +177,6 @@ const Documents: FC = ({ datasetId }) => { invalidChunkList() invalidChildChunkList() }, 5000) - // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const documentsWithProgress = useMemo(() => { @@ -273,6 +271,13 @@ const Documents: FC = ({ datasetId }) => { const documentsList = isDataSourceNotion ? documentsWithProgress?.data : documentsRes?.data const [selectedIds, setSelectedIds] = useState([]) + + // Clear selection when search changes to avoid confusion + useEffect(() => { + if (searchValue !== query.keyword) + setSelectedIds([]) + }, [searchValue, query.keyword]) + const { run: handleSearch } = useDebounceFn(() => { setSearchValue(inputValue) }, { wait: 500 }) diff --git a/web/app/components/datasets/documents/list.tsx b/web/app/components/datasets/documents/list.tsx index 2eb6a3ac1e..2697580f4e 100644 --- a/web/app/components/datasets/documents/list.tsx +++ b/web/app/components/datasets/documents/list.tsx @@ -458,7 +458,8 @@ const DocumentList: FC = ({ handleSave, } = useBatchEditDocumentMetadata({ datasetId, - docList: documents.filter(item => selectedIds.includes(item.id)), + docList: documents.filter(doc => selectedIds.includes(doc.id)), + selectedDocumentIds: selectedIds, // Pass all selected IDs separately onUpdate, }) diff --git a/web/app/components/datasets/metadata/hooks/use-batch-edit-document-metadata.ts b/web/app/components/datasets/metadata/hooks/use-batch-edit-document-metadata.ts index 3bb6e1d6ed..f350fd7b8b 100644 --- a/web/app/components/datasets/metadata/hooks/use-batch-edit-document-metadata.ts +++ b/web/app/components/datasets/metadata/hooks/use-batch-edit-document-metadata.ts @@ -9,12 +9,14 @@ import { t } from 'i18next' type Props = { datasetId: string docList: SimpleDocumentDetail[] + selectedDocumentIds?: string[] onUpdate: () => void } const useBatchEditDocumentMetadata = ({ datasetId, docList, + selectedDocumentIds, onUpdate, }: Props) => { const [isShowEditModal, { @@ -79,9 +81,12 @@ const useBatchEditDocumentMetadata = ({ return false }) - const res: MetadataBatchEditToServer = docList.map((item, i) => { - // the new metadata will override the old one - const oldMetadataList = metaDataList[i] + // Use selectedDocumentIds if available, otherwise fall back to docList + const documentIds = selectedDocumentIds || docList.map(doc => doc.id) + const res: MetadataBatchEditToServer = documentIds.map((documentId) => { + // Find the document in docList to get its metadata + const docIndex = docList.findIndex(doc => doc.id === documentId) + const oldMetadataList = docIndex >= 0 ? metaDataList[docIndex] : [] let newMetadataList: MetadataItemWithValue[] = [...oldMetadataList, ...addedList] .filter((item) => { return !removedList.find(removedItem => removedItem.id === item.id) @@ -108,7 +113,7 @@ const useBatchEditDocumentMetadata = ({ }) return { - document_id: item.id, + document_id: documentId, metadata_list: newMetadataList, } }) From 67a0751cf36adcabc9a4478e4d2e20a4bca80c67 Mon Sep 17 00:00:00 2001 From: Will Date: Sun, 27 Jul 2025 11:06:37 +0800 Subject: [PATCH 020/415] fix: Improve create_agent_thought and save_agent_thought Logic (#21263) --- api/core/agent/base_agent_runner.py | 48 +++++++++++++---------------- api/core/agent/cot_agent_runner.py | 16 +++++----- api/core/agent/fc_agent_runner.py | 14 ++++----- 3 files changed, 37 insertions(+), 41 deletions(-) diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index 1f3c218d59..ad9b625350 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -280,7 +280,7 @@ class BaseAgentRunner(AppRunner): def create_agent_thought( self, message_id: str, message: str, tool_name: str, tool_input: str, messages_ids: list[str] - ) -> MessageAgentThought: + ) -> str: """ Create agent thought """ @@ -313,16 +313,15 @@ class BaseAgentRunner(AppRunner): db.session.add(thought) db.session.commit() - db.session.refresh(thought) + agent_thought_id = str(thought.id) + self.agent_thought_count += 1 db.session.close() - self.agent_thought_count += 1 - - return thought + return agent_thought_id def save_agent_thought( self, - agent_thought: MessageAgentThought, + agent_thought_id: str, tool_name: str | None, tool_input: Union[str, dict, None], thought: str | None, @@ -335,12 +334,9 @@ class BaseAgentRunner(AppRunner): """ Save agent thought """ - updated_agent_thought = ( - db.session.query(MessageAgentThought).where(MessageAgentThought.id == agent_thought.id).first() - ) - if not updated_agent_thought: + agent_thought = db.session.query(MessageAgentThought).where(MessageAgentThought.id == agent_thought_id).first() + if not agent_thought: raise ValueError("agent thought not found") - agent_thought = updated_agent_thought if thought: agent_thought.thought += thought @@ -355,7 +351,7 @@ class BaseAgentRunner(AppRunner): except Exception: tool_input = json.dumps(tool_input) - updated_agent_thought.tool_input = tool_input + agent_thought.tool_input = tool_input if observation: if isinstance(observation, dict): @@ -364,27 +360,27 @@ class BaseAgentRunner(AppRunner): except Exception: observation = json.dumps(observation) - updated_agent_thought.observation = observation + agent_thought.observation = observation if answer: agent_thought.answer = answer if messages_ids is not None and len(messages_ids) > 0: - updated_agent_thought.message_files = json.dumps(messages_ids) + agent_thought.message_files = json.dumps(messages_ids) if llm_usage: - updated_agent_thought.message_token = llm_usage.prompt_tokens - updated_agent_thought.message_price_unit = llm_usage.prompt_price_unit - updated_agent_thought.message_unit_price = llm_usage.prompt_unit_price - updated_agent_thought.answer_token = llm_usage.completion_tokens - updated_agent_thought.answer_price_unit = llm_usage.completion_price_unit - updated_agent_thought.answer_unit_price = llm_usage.completion_unit_price - updated_agent_thought.tokens = llm_usage.total_tokens - updated_agent_thought.total_price = llm_usage.total_price + agent_thought.message_token = llm_usage.prompt_tokens + agent_thought.message_price_unit = llm_usage.prompt_price_unit + agent_thought.message_unit_price = llm_usage.prompt_unit_price + agent_thought.answer_token = llm_usage.completion_tokens + agent_thought.answer_price_unit = llm_usage.completion_price_unit + agent_thought.answer_unit_price = llm_usage.completion_unit_price + agent_thought.tokens = llm_usage.total_tokens + agent_thought.total_price = llm_usage.total_price # check if tool labels is not empty - labels = updated_agent_thought.tool_labels or {} - tools = updated_agent_thought.tool.split(";") if updated_agent_thought.tool else [] + labels = agent_thought.tool_labels or {} + tools = agent_thought.tool.split(";") if agent_thought.tool else [] for tool in tools: if not tool: continue @@ -395,7 +391,7 @@ class BaseAgentRunner(AppRunner): else: labels[tool] = {"en_US": tool, "zh_Hans": tool} - updated_agent_thought.tool_labels_str = json.dumps(labels) + agent_thought.tool_labels_str = json.dumps(labels) if tool_invoke_meta is not None: if isinstance(tool_invoke_meta, dict): @@ -404,7 +400,7 @@ class BaseAgentRunner(AppRunner): except Exception: tool_invoke_meta = json.dumps(tool_invoke_meta) - updated_agent_thought.tool_meta_str = tool_invoke_meta + agent_thought.tool_meta_str = tool_invoke_meta db.session.commit() db.session.close() diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 4979f63432..565fb42478 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -97,13 +97,13 @@ class CotAgentRunner(BaseAgentRunner, ABC): message_file_ids: list[str] = [] - agent_thought = self.create_agent_thought( + agent_thought_id = self.create_agent_thought( message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids ) if iteration_step > 1: self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) # recalc llm max tokens @@ -133,7 +133,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): # publish agent thought if it's first iteration if iteration_step == 1: self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) for chunk in react_chunks: @@ -168,7 +168,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): usage_dict["usage"] = LLMUsage.empty_usage() self.save_agent_thought( - agent_thought=agent_thought, + agent_thought_id=agent_thought_id, tool_name=(scratchpad.action.action_name if scratchpad.action and not scratchpad.is_final() else ""), tool_input={scratchpad.action.action_name: scratchpad.action.action_input} if scratchpad.action else {}, tool_invoke_meta={}, @@ -181,7 +181,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): if not scratchpad.is_final(): self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) if not scratchpad.action: @@ -212,7 +212,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): scratchpad.agent_response = tool_invoke_response self.save_agent_thought( - agent_thought=agent_thought, + agent_thought_id=agent_thought_id, tool_name=scratchpad.action.action_name, tool_input={scratchpad.action.action_name: scratchpad.action.action_input}, thought=scratchpad.thought or "", @@ -224,7 +224,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): ) self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) # update prompt tool message @@ -244,7 +244,7 @@ class CotAgentRunner(BaseAgentRunner, ABC): # save agent thought self.save_agent_thought( - agent_thought=agent_thought, + agent_thought_id=agent_thought_id, tool_name="", tool_input={}, tool_invoke_meta={}, diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 5491689ece..4df71ce9de 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -80,7 +80,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): prompt_messages_tools = [] message_file_ids: list[str] = [] - agent_thought = self.create_agent_thought( + agent_thought_id = self.create_agent_thought( message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids ) @@ -114,7 +114,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): for chunk in chunks: if is_first_chunk: self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) is_first_chunk = False # check if there is any tool call @@ -172,7 +172,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): result.message.content = "" self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) yield LLMResultChunk( @@ -205,7 +205,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): # save thought self.save_agent_thought( - agent_thought=agent_thought, + agent_thought_id=agent_thought_id, tool_name=tool_call_names, tool_input=tool_call_inputs, thought=response, @@ -216,7 +216,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): llm_usage=current_llm_usage, ) self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) final_answer += response + "\n" @@ -276,7 +276,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): if len(tool_responses) > 0: # save agent thought self.save_agent_thought( - agent_thought=agent_thought, + agent_thought_id=agent_thought_id, tool_name="", tool_input="", thought="", @@ -291,7 +291,7 @@ class FunctionCallAgentRunner(BaseAgentRunner): messages_ids=message_file_ids, ) self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought.id), PublishFrom.APPLICATION_MANAGER + QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER ) # update prompt tool From 177b0fb5e805b7fce223b711eaafcddb5a9a8846 Mon Sep 17 00:00:00 2001 From: znn Date: Mon, 28 Jul 2025 07:34:31 +0530 Subject: [PATCH 021/415] =?UTF-8?q?ability=20to=20select=20same=20type=20s?= =?UTF-8?q?ub=20item=20by=20preserving=20children=20of=20both=20f=E2=80=A6?= =?UTF-8?q?=20(#23002)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../nodes/_base/components/variable/utils.ts | 65 ++++++++++++++++--- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/web/app/components/workflow/nodes/_base/components/variable/utils.ts b/web/app/components/workflow/nodes/_base/components/variable/utils.ts index 3737db8abf..8c3ffb8810 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/utils.ts +++ b/web/app/components/workflow/nodes/_base/components/variable/utils.ts @@ -49,6 +49,13 @@ export const isConversationVar = (valueSelector: ValueSelector) => { return valueSelector[0] === 'conversation' } +export const hasValidChildren = (children: any): boolean => { + return children && ( + (Array.isArray(children) && children.length > 0) + || (!Array.isArray(children) && Object.keys((children as StructuredOutput)?.schema?.properties || {}).length > 0) + ) +} + const inputVarTypeToVarType = (type: InputVarType): VarType => { return ({ [InputVarType.number]: VarType.number, @@ -139,19 +146,57 @@ const findExceptVarInObject = (obj: any, filterVar: (payload: Var, selector: Val if (isStructuredOutput) { childrenResult = findExceptVarInStructuredOutput(children, filterVar) } - else if (Array.isArray(children)) { - childrenResult = children.filter((item: Var) => { - const { children: itemChildren } = item - const currSelector = [...value_selector, item.variable] + else if (Array.isArray(children)) { + childrenResult = children + .map((item: Var) => { + const { children: itemChildren } = item + const currSelector = [...value_selector, item.variable] - if (!itemChildren) - return filterVar(item, currSelector) + if (!itemChildren) { + return { + item, + filteredObj: null, + passesFilter: filterVar(item, currSelector), + } + } - const filteredObj = findExceptVarInObject(item, filterVar, currSelector, false) // File doesn't contain file children - return filteredObj.children && (filteredObj.children as Var[])?.length > 0 - }) + const filteredObj = findExceptVarInObject(item, filterVar, currSelector, false) + const itemHasValidChildren = hasValidChildren(filteredObj.children) + + let passesFilter + if ((item.type === VarType.object || item.type === VarType.file) && itemChildren) + passesFilter = itemHasValidChildren || filterVar(item, currSelector) + else + passesFilter = itemHasValidChildren + + return { + item, + filteredObj, + passesFilter, + } + }) + .filter(({ passesFilter }) => passesFilter) + .map(({ item, filteredObj }) => { + const { children: itemChildren } = item + if (!itemChildren || !filteredObj) + return item + + return { + ...item, + children: filteredObj.children, + } + }) + + if (isFile && Array.isArray(childrenResult)) { + if (childrenResult.length === 0) { + childrenResult = OUTPUT_FILE_SUB_VARIABLES.map(key => ({ + variable: key, + type: key === 'size' ? VarType.number : VarType.string, + })) + } + } } - else { + else { childrenResult = [] } From 1c05491f1cfbf47575949695417a6e90d9d5cebd Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Mon, 28 Jul 2025 10:04:45 +0800 Subject: [PATCH 022/415] Chore: remove duplicate TYPE_CHECKING import (#23013) Signed-off-by: Yongtao Huang --- api/core/rag/docstore/dataset_docstore.py | 6 +++--- api/models/model.py | 3 --- api/models/workflow.py | 3 --- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/api/core/rag/docstore/dataset_docstore.py b/api/core/rag/docstore/dataset_docstore.py index f844770a20..f8da3657fc 100644 --- a/api/core/rag/docstore/dataset_docstore.py +++ b/api/core/rag/docstore/dataset_docstore.py @@ -32,7 +32,7 @@ class DatasetDocumentStore: } @property - def dateset_id(self) -> Any: + def dataset_id(self) -> Any: return self._dataset.id @property @@ -123,13 +123,13 @@ class DatasetDocumentStore: db.session.flush() if save_child: if doc.children: - for postion, child in enumerate(doc.children, start=1): + for position, child in enumerate(doc.children, start=1): child_segment = ChildChunk( tenant_id=self._dataset.tenant_id, dataset_id=self._dataset.id, document_id=self._document_id, segment_id=segment_document.id, - position=postion, + position=position, index_node_id=child.metadata.get("doc_id"), index_node_hash=child.metadata.get("doc_hash"), content=child.page_content, diff --git a/api/models/model.py b/api/models/model.py index a78a91ebd5..9f6d51b315 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -32,9 +32,6 @@ from .engine import db from .enums import CreatorUserRole from .types import StringUUID -if TYPE_CHECKING: - from .workflow import Workflow - class DifySetup(Base): __tablename__ = "dify_setups" diff --git a/api/models/workflow.py b/api/models/workflow.py index 79d96e42dd..d89db6c7da 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -42,9 +42,6 @@ from .types import EnumText, StringUUID _logger = logging.getLogger(__name__) -if TYPE_CHECKING: - from models.model import AppMode - class WorkflowType(Enum): """ From 0546351d3e27cf7eca6021783af1826b4b59d1f5 Mon Sep 17 00:00:00 2001 From: HyaCinth <88471803+HyaCiovo@users.noreply.github.com> Date: Mon, 28 Jul 2025 10:34:11 +0800 Subject: [PATCH 023/415] refactor(web): Optimize the interaction effect of ToolTip component in menu items (#23020) (#23023) --- web/app/components/explore/create-app-modal/index.tsx | 2 +- web/app/components/workflow/block-selector/blocks.tsx | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/web/app/components/explore/create-app-modal/index.tsx b/web/app/components/explore/create-app-modal/index.tsx index 7e1e59b51b..e94999db04 100644 --- a/web/app/components/explore/create-app-modal/index.tsx +++ b/web/app/components/explore/create-app-modal/index.tsx @@ -27,7 +27,7 @@ export type CreateAppModalProps = { appIconUrl?: string | null appMode?: string appUseIconAsAnswerIcon?: boolean - max_active_requests: number | null + max_active_requests?: number | null onConfirm: (info: { name: string icon_type: AppIconType diff --git a/web/app/components/workflow/block-selector/blocks.tsx b/web/app/components/workflow/block-selector/blocks.tsx index 4182530a91..27f8847655 100644 --- a/web/app/components/workflow/block-selector/blocks.tsx +++ b/web/app/components/workflow/block-selector/blocks.tsx @@ -70,6 +70,7 @@ const Blocks = ({ key={block.type} position='right' popupClassName='w-[200px]' + needsDelay={false} popupContent={(
Date: Mon, 28 Jul 2025 11:01:38 +0800 Subject: [PATCH 024/415] Rollback Aliyun Trace Icon File (#23027) --- api/core/ops/aliyun_trace/aliyun_trace.py | 20 +- .../icons/src/public/tracing/AliyunIcon.json | 245 +++++++++--------- .../src/public/tracing/AliyunIconBig.json | 184 +++++-------- 3 files changed, 195 insertions(+), 254 deletions(-) diff --git a/api/core/ops/aliyun_trace/aliyun_trace.py b/api/core/ops/aliyun_trace/aliyun_trace.py index 9dd830a023..af0e38f7ef 100644 --- a/api/core/ops/aliyun_trace/aliyun_trace.py +++ b/api/core/ops/aliyun_trace/aliyun_trace.py @@ -139,7 +139,7 @@ class AliyunDataTrace(BaseTraceInstance): start_time=convert_datetime_to_nanoseconds(trace_info.start_time), end_time=convert_datetime_to_nanoseconds(trace_info.end_time), attributes={ - GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""), + GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id") or "", GEN_AI_USER_ID: str(user_id), GEN_AI_SPAN_KIND: GenAISpanKind.CHAIN.value, GEN_AI_FRAMEWORK: "dify", @@ -161,12 +161,12 @@ class AliyunDataTrace(BaseTraceInstance): start_time=convert_datetime_to_nanoseconds(trace_info.start_time), end_time=convert_datetime_to_nanoseconds(trace_info.end_time), attributes={ - GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""), + GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id") or "", GEN_AI_USER_ID: str(user_id), GEN_AI_SPAN_KIND: GenAISpanKind.LLM.value, GEN_AI_FRAMEWORK: "dify", - GEN_AI_MODEL_NAME: trace_info.metadata.get("ls_model_name", ""), - GEN_AI_SYSTEM: trace_info.metadata.get("ls_provider", ""), + GEN_AI_MODEL_NAME: trace_info.metadata.get("ls_model_name") or "", + GEN_AI_SYSTEM: trace_info.metadata.get("ls_provider") or "", GEN_AI_USAGE_INPUT_TOKENS: str(trace_info.message_tokens), GEN_AI_USAGE_OUTPUT_TOKENS: str(trace_info.answer_tokens), GEN_AI_USAGE_TOTAL_TOKENS: str(trace_info.total_tokens), @@ -386,14 +386,14 @@ class AliyunDataTrace(BaseTraceInstance): GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id") or "", GEN_AI_SPAN_KIND: GenAISpanKind.LLM.value, GEN_AI_FRAMEWORK: "dify", - GEN_AI_MODEL_NAME: process_data.get("model_name", ""), - GEN_AI_SYSTEM: process_data.get("model_provider", ""), + GEN_AI_MODEL_NAME: process_data.get("model_name") or "", + GEN_AI_SYSTEM: process_data.get("model_provider") or "", GEN_AI_USAGE_INPUT_TOKENS: str(usage_data.get("prompt_tokens", 0)), GEN_AI_USAGE_OUTPUT_TOKENS: str(usage_data.get("completion_tokens", 0)), GEN_AI_USAGE_TOTAL_TOKENS: str(usage_data.get("total_tokens", 0)), GEN_AI_PROMPT: json.dumps(process_data.get("prompts", []), ensure_ascii=False), GEN_AI_COMPLETION: str(outputs.get("text", "")), - GEN_AI_RESPONSE_FINISH_REASON: outputs.get("finish_reason", ""), + GEN_AI_RESPONSE_FINISH_REASON: outputs.get("finish_reason") or "", INPUT_VALUE: json.dumps(process_data.get("prompts", []), ensure_ascii=False), OUTPUT_VALUE: str(outputs.get("text", "")), }, @@ -421,7 +421,7 @@ class AliyunDataTrace(BaseTraceInstance): GEN_AI_USER_ID: str(user_id), GEN_AI_SPAN_KIND: GenAISpanKind.CHAIN.value, GEN_AI_FRAMEWORK: "dify", - INPUT_VALUE: trace_info.workflow_run_inputs.get("sys.query", ""), + INPUT_VALUE: trace_info.workflow_run_inputs.get("sys.query") or "", OUTPUT_VALUE: json.dumps(trace_info.workflow_run_outputs, ensure_ascii=False), }, status=status, @@ -461,8 +461,8 @@ class AliyunDataTrace(BaseTraceInstance): attributes={ GEN_AI_SPAN_KIND: GenAISpanKind.LLM.value, GEN_AI_FRAMEWORK: "dify", - GEN_AI_MODEL_NAME: trace_info.metadata.get("ls_model_name", ""), - GEN_AI_SYSTEM: trace_info.metadata.get("ls_provider", ""), + GEN_AI_MODEL_NAME: trace_info.metadata.get("ls_model_name") or "", + GEN_AI_SYSTEM: trace_info.metadata.get("ls_provider") or "", GEN_AI_PROMPT: json.dumps(trace_info.inputs, ensure_ascii=False), GEN_AI_COMPLETION: json.dumps(trace_info.suggested_question, ensure_ascii=False), INPUT_VALUE: json.dumps(trace_info.inputs, ensure_ascii=False), diff --git a/web/app/components/base/icons/src/public/tracing/AliyunIcon.json b/web/app/components/base/icons/src/public/tracing/AliyunIcon.json index 9a0b89f20a..5cbb52c237 100644 --- a/web/app/components/base/icons/src/public/tracing/AliyunIcon.json +++ b/web/app/components/base/icons/src/public/tracing/AliyunIcon.json @@ -1,131 +1,118 @@ { - "icon": { - "type": "element", - "isRootNode": true, - "name": "svg", - "attributes": { - "xmlns": "http://www.w3.org/2000/svg", - "xmlns:xlink": "http://www.w3.org/1999/xlink", - "fill": "none", - "version": "1.1", - "width": "106", - "height": "16", - "viewBox": "0 0 106 16" - }, - "children": [ - { - "type": "element", - "name": "defs", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "clipPath", - "attributes": { - "id": "master_svg0_36_00924" - }, - "children": [ - { - "type": "element", - "name": "rect", - "attributes": { - "x": "0", - "y": "0", - "width": "19", - "height": "16", - "rx": "0" - }, - "children": [] - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "g", - "attributes": { - "clip-path": "url(#master_svg0_36_00924)" - }, - "children": [ - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M4.06862,14.6667C3.79213,14.6667,3.45463,14.5688,3.05614,14.373C2.97908,14.3351,2.92692,14.3105,2.89968,14.2992C2.33193,14.0628,1.82911,13.7294,1.39123,13.2989C0.463742,12.3871,0,11.2874,0,10C0,8.71258,0.463742,7.61293,1.39123,6.70107C2.16172,5.94358,3.06404,5.50073,4.09819,5.37252C4.23172,3.98276,4.81755,2.77756,5.85569,1.75693C7.04708,0.585642,8.4857,0,10.1716,0C11.5256,0,12.743,0.396982,13.8239,1.19095C14.8847,1.97019,15.61,2.97855,16,4.21604L14.7045,4.61063C14.4016,3.64918,13.8374,2.86532,13.0121,2.25905C12.1719,1.64191,11.2251,1.33333,10.1716,1.33333C8.8602,1.33333,7.74124,1.7888,6.81467,2.69974C5.88811,3.61067,5.42483,4.71076,5.42483,6L5.42483,6.66667L4.74673,6.66667C3.81172,6.66667,3.01288,6.99242,2.35021,7.64393C1.68754,8.2954,1.35621,9.08076,1.35621,10C1.35621,10.9192,1.68754,11.7046,2.35021,12.3561C2.66354,12.6641,3.02298,12.9026,3.42852,13.0714C3.48193,13.0937,3.55988,13.13,3.66237,13.1803C3.87004,13.2823,4.00545,13.3333,4.06862,13.3333L4.06862,14.6667Z", - "fill-rule": "evenodd", - "fill": "#000000", - "fill-opacity": "1", - "style": "mix-blend-mode:passthrough" - }, - "children": [] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M13.458613505859375,7.779393492279053C12.975613505859375,7.717463492279053,12.484813505859375,7.686503492279053,11.993983505859376,7.686503492279053C11.152583505859376,7.686503492279053,10.303403505859375,7.779393492279053,9.493183505859374,7.941943492279053C8.682953505859375,8.104503492279052,7.903893505859375,8.359943492279053,7.155983505859375,8.654083492279053C6.657383505859375,8.870823492279053,6.158783505859375,9.128843492279053,5.660181505859375,9.428153492279053C5.332974751859375,9.621673492279053,5.239486705859375,10.070633492279054,5.434253505859375,10.395743492279053L7.413073505859375,13.298533492279052C7.639003505859375,13.623603492279052,8.090863505859375,13.716463492279052,8.418073505859375,13.523003492279052C8.547913505859375,13.435263492279052,8.763453505859374,13.326893492279053,9.064693505859374,13.197863492279053C9.516553505859374,13.004333492279052,9.976203505859374,12.872733492279053,10.459223505859375,12.779863492279052C10.942243505859375,12.679263492279052,11.433053505859375,12.617333492279052,11.955023505859375,12.617333492279052L13.380683505859375,7.810353492279052L13.458613505859375,7.779393492279053ZM15.273813505859374,8.135463492279053L15.016753505859375,5.333333492279053L13.458613505859375,7.787133492279053C13.817013505859375,7.818093492279052,14.144213505859375,7.880023492279053,14.494743505859375,7.949683492279053C14.494743505859375,7.944523492279053,14.754433505859375,8.006453492279054,15.273813505859374,8.135463492279053ZM12.064083505859376,12.648273492279053L11.378523505859375,14.970463492279054L12.515943505859376,16.00003349227905L14.074083505859376,15.643933492279054L14.525943505859376,13.027603492279052C14.198743505859374,12.934663492279054,13.879283505859375,12.834063492279054,13.552083505859375,12.772133492279053C13.069083505859375,12.717933492279052,12.578283505859375,12.648273492279053,12.064083505859376,12.648273492279053ZM18.327743505859374,9.428153492279053C17.829143505859374,9.128843492279053,17.330543505859374,8.870823492279053,16.831943505859375,8.654083492279053C16.348943505859374,8.460573492279053,15.826943505859376,8.267053492279054,15.305013505859375,8.135463492279053L15.305013505859375,8.267053492279054L14.463613505859374,13.043063492279053C14.596083505859376,13.105003492279053,14.759683505859375,13.135933492279053,14.884283505859376,13.205603492279053C15.185523505859376,13.334623492279052,15.401043505859375,13.443003492279052,15.530943505859375,13.530733492279053C15.858143505859376,13.724263492279054,16.341143505859375,13.623603492279052,16.535943505859375,13.306263492279053L18.514743505859375,10.403483492279053C18.779643505859376,10.039673492279054,18.686143505859377,9.621673492279053,18.327743505859374,9.428153492279053Z", - "fill": "#000000", - "fill-opacity": "1", - "style": "mix-blend-mode:passthrough" - }, - "children": [] - } - ] - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M36.174,12.958L35.278,14.358Q31.344,11.964,29.958,8.884Q29.258,10.48,27.9,11.81Q26.542,13.14,24.624,14.372L23.644,12.986Q28.642,10.186,29.034,6.378L24.12,6.378L24.12,4.95L29.076,4.95L29.076,1.743999L30.616,1.7999990000000001L30.616,4.95L35.614000000000004,4.95L35.614000000000004,6.378L30.588,6.378L30.573999999999998,6.56Q31.078,8.646,32.408,10.144Q33.738,11.642,36.174,12.958ZM44.658,4.922000000000001L43.454,4.922000000000001L43.454,4.152L41.745999999999995,4.152L41.745999999999995,2.948L43.454,2.948L43.454,1.716L44.658,1.771999L44.658,2.948L46.492000000000004,2.948L46.492000000000004,1.716L47.682,1.771999L47.682,2.948L49.586,2.948L49.586,4.152L47.682,4.152L47.682,4.922000000000001L46.492000000000004,4.922000000000001L46.492000000000004,4.152L44.658,4.152L44.658,4.922000000000001ZM46.519999999999996,11.474Q47.010000000000005,12.146,47.870999999999995,12.615Q48.732,13.084,50.104,13.364L49.726,14.624Q46.884,13.924,45.61,12.286Q45.106,13.042,44.111999999999995,13.616Q43.117999999999995,14.19,41.507999999999996,14.638L41.004000000000005,13.42Q42.488,13.098,43.349000000000004,12.615Q44.21,12.132,44.574,11.474L41.522,11.474L41.522,10.368L44.896,10.368Q44.91,10.312,44.91,10.214Q44.924,10.032,44.924,9.542L42.152,9.542L42.152,8.492L41.284,9.108Q40.989999999999995,8.464,40.5,7.708L40.5,14.358L39.282,14.358L39.282,8.268Q38.61,9.99,37.952,11.082L36.944,10.074Q37.532,9.122,38.106,7.939Q38.68,6.756,39.058,5.664L37.658,5.664L37.658,4.390000000000001L39.282,4.390000000000001L39.282,1.7579989999999999L40.5,1.814L40.5,4.390000000000001L41.816,4.390000000000001L41.816,5.664L40.5,5.664L40.5,7.134L41.116,6.658Q41.704,7.386,42.152,8.24L42.152,5.244L48.97,5.244L48.97,9.542L46.226,9.542Q46.198,10.018,46.198,10.214L46.198,10.368L49.641999999999996,10.368L49.641999999999996,11.474L46.519999999999996,11.474ZM47.85,6.952L47.85,6.28L43.314,6.28L43.314,6.952L47.85,6.952ZM47.85,7.862L43.314,7.862L43.314,8.506L47.85,8.506L47.85,7.862ZM59.904,9.388L59.512,8.114L60.548,8.030000000000001Q61.066,7.988,61.234,7.855Q61.402,7.722,61.402,7.274L61.402,2.01L62.704,2.066L62.704,7.624Q62.704,8.268,62.55,8.604Q62.396,8.940000000000001,62.025,9.094Q61.654,9.248,60.94,9.304L59.904,9.388ZM51.546,9.276Q52.274,8.52,52.596000000000004,7.988Q52.918,7.456,53.016,6.784L51.518,6.784L51.518,5.566L53.1,5.566L53.1,5.188L53.1,3.718L51.867999999999995,3.718L51.867999999999995,2.458L58.448,2.458L58.448,3.718L57.244,3.718L57.244,5.566L58.728,5.566L58.728,6.784L57.244,6.784L57.244,9.206L55.928,9.206L55.928,6.784L54.332,6.784Q54.22,7.792,53.842,8.52Q53.464,9.248,52.61,10.102L51.546,9.276ZM59.092,2.724L60.366,2.7800000000000002L60.366,7.61L59.092,7.61L59.092,2.724ZM54.402,3.718L54.402,5.202L54.402,5.566L55.928,5.566L55.928,3.718L54.402,3.718ZM58.126,11.348L58.126,12.86L63.53,12.86L63.53,14.106L51.322,14.106L51.322,12.86L56.74,12.86L56.74,11.348L52.75,11.348L52.75,10.13L56.74,10.13L56.74,9.332L58.126,9.388L58.126,10.13L62.13,10.13L62.13,11.348L58.126,11.348ZM77.39,2.528L77.39,3.9L75.64,3.9L75.64,12.272Q75.64,13.098,75.465,13.49Q75.28999999999999,13.882,74.84899999999999,14.05Q74.408,14.218,73.47,14.302L72.56,14.386L72.126,13L73.19,12.916Q73.68,12.874,73.89699999999999,12.79Q74.114,12.706,74.184,12.51Q74.25399999999999,12.314,74.25399999999999,11.88L74.25399999999999,3.9L65.042,3.9L65.042,2.528L77.39,2.528ZM66.512,5.524L72.26599999999999,5.524L72.26599999999999,11.712L66.512,11.712L66.512,5.524ZM67.842,10.354L70.95,10.354L70.95,6.896L67.842,6.896L67.842,10.354ZM88.772,3.648L85.118,3.648L85.118,10.298L83.80199999999999,10.298L83.80199999999999,2.332L90.088,2.332L90.088,10.27L88.772,10.27L88.772,3.648ZM82.668,12.65Q82.23400000000001,11.712,81.632,10.522Q80.862,12.146,79.518,14.092L78.45400000000001,13.182Q80.036,11.068,80.89,9.024Q79.7,6.728,79,5.552L80.02199999999999,4.894Q80.48400000000001,5.622,81.47800000000001,7.386Q81.87,6.042,82.122,4.2780000000000005L79.02799999999999,4.2780000000000005L79.02799999999999,2.934L83.47999999999999,2.934L83.47999999999999,4.2780000000000005Q83.144,6.784,82.318,8.940000000000001Q83.158,10.508,83.774,11.782L82.668,12.65ZM91.166,11.264Q91.124,12.104,91.04,12.636Q90.956,13.28,90.802,13.602Q90.648,13.924,90.326,14.064Q90.004,14.204,89.374,14.204L88.142,14.204Q87.344,14.204,87.029,13.868Q86.714,13.532,86.714,12.636L86.714,11.11Q86.21000000000001,12.104,85.356,12.972Q84.50200000000001,13.84,83.2,14.708L82.332,13.56Q83.886,12.608,84.691,11.705Q85.49600000000001,10.802,85.804,9.745Q86.112,8.687999999999999,86.168,7.05L86.21000000000001,4.306L87.61,4.362L87.568,7.218Q87.526,8.366,87.344,9.276L88.016,9.304L88.016,12.16Q88.016,12.608,88.128,12.734Q88.24,12.86,88.632,12.86L89.108,12.86Q89.486,12.86,89.619,12.741Q89.752,12.622,89.808,12.174Q89.892,11.362,89.892,10.788L91.166,11.264ZM93.56,1.884Q94.036,2.206,94.68,2.759Q95.324,3.312,95.702,3.704L94.904,4.795999999999999Q94.596,4.418,93.938,3.809Q93.28,3.2,92.832,2.85L93.56,1.884ZM102.1,12.93Q102.478,12.888,102.653,12.832Q102.828,12.776,102.898,12.636Q102.968,12.496,102.968,12.188L102.968,1.981999L104.06,2.0380000000000003L104.06,12.608Q104.06,13.238,103.948,13.546Q103.836,13.854,103.521,13.994Q103.206,14.134,102.534,14.19L101.75,14.246L101.372,12.986L102.1,12.93ZM95.702,10.774L95.702,2.5140000000000002L100.168,2.5140000000000002L100.168,10.732L99.006,10.732L99.006,3.732L96.836,3.732L96.836,10.774L95.702,10.774ZM101.008,11.152L101.008,3.2L102.1,3.256L102.1,11.152L101.008,11.152ZM94.652,13.364Q95.856,12.482,96.43,11.789Q97.004,11.096,97.2,10.277Q97.396,9.458,97.396,8.058L97.396,4.362L98.488,4.418L98.488,8.058Q98.488,9.738,98.201,10.809Q97.914,11.88,97.277,12.664Q96.64,13.448,95.45,14.344L94.652,13.364ZM93.07,5.034Q93.546,5.37,94.197,5.937Q94.848,6.504,95.282,6.952L94.484,8.072Q94.078,7.61,93.427,7.015Q92.776,6.42,92.258,6.028L93.07,5.034ZM92.524,13.742Q92.748,13.126,93.266,11.278Q93.784,9.43,93.896,8.814L94.498,9.01L95.072,9.206Q94.89,10.032,94.421,11.733Q93.952,13.434,93.714,14.162L92.524,13.742ZM98.74,10.858Q99.888,11.908,100.714,12.958L99.888,13.868Q99.356,13.154,98.943,12.671Q98.53,12.188,97.984,11.684L98.74,10.858Z", - "fill": "#000000", - "fill-opacity": "1" - }, - "children": [] - } - ] - } - ] - } - ] - } - ] - }, - "name": "AliyunIcon" + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "xmlns": "http://www.w3.org/2000/svg", + "xmlns:xlink": "http://www.w3.org/1999/xlink", + "fill": "none", + "version": "1.1", + "width": "65", + "height": "16", + "viewBox": "0 0 65 16" + }, + "children": [ + { + "type": "element", + "name": "defs", + "children": [ + { + "type": "element", + "name": "clipPath", + "attributes": { + "id": "master_svg0_42_34281" + }, + "children": [ + { + "type": "element", + "name": "rect", + "attributes": { + "x": "0", + "y": "0", + "width": "19", + "height": "16", + "rx": "0" + } + } + ] + } + ] + }, + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "g", + "attributes": { + "clip-path": "url(#master_svg0_42_34281)" + }, + "children": [ + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M4.06862,14.6667C3.79213,14.6667,3.45463,14.5688,3.05614,14.373C2.97908,14.3351,2.92692,14.3105,2.89968,14.2992C2.33193,14.0628,1.82911,13.7294,1.39123,13.2989C0.463742,12.3871,0,11.2874,0,10C0,8.71258,0.463742,7.61293,1.39123,6.70107C2.16172,5.94358,3.06404,5.50073,4.09819,5.37252C4.23172,3.98276,4.81755,2.77756,5.85569,1.75693C7.04708,0.585642,8.4857,0,10.1716,0C11.5256,0,12.743,0.396982,13.8239,1.19095C14.8847,1.97019,15.61,2.97855,16,4.21604L14.7045,4.61063C14.4016,3.64918,13.8374,2.86532,13.0121,2.25905C12.1719,1.64191,11.2251,1.33333,10.1716,1.33333C8.8602,1.33333,7.74124,1.7888,6.81467,2.69974C5.88811,3.61067,5.42483,4.71076,5.42483,6L5.42483,6.66667L4.74673,6.66667C3.81172,6.66667,3.01288,6.99242,2.35021,7.64393C1.68754,8.2954,1.35621,9.08076,1.35621,10C1.35621,10.9192,1.68754,11.7046,2.35021,12.3561C2.66354,12.6641,3.02298,12.9026,3.42852,13.0714C3.48193,13.0937,3.55988,13.13,3.66237,13.1803C3.87004,13.2823,4.00545,13.3333,4.06862,13.3333L4.06862,14.6667Z", + "fill-rule": "evenodd", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + }, + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M13.458613505859375,7.779393492279053C12.975613505859375,7.717463492279053,12.484813505859375,7.686503492279053,11.993983505859376,7.686503492279053C11.152583505859376,7.686503492279053,10.303403505859375,7.779393492279053,9.493183505859374,7.941943492279053C8.682953505859375,8.104503492279052,7.903893505859375,8.359943492279053,7.155983505859375,8.654083492279053C6.657383505859375,8.870823492279053,6.158783505859375,9.128843492279053,5.660181505859375,9.428153492279053C5.332974751859375,9.621673492279053,5.239486705859375,10.070633492279054,5.434253505859375,10.395743492279053L7.413073505859375,13.298533492279052C7.639003505859375,13.623603492279052,8.090863505859375,13.716463492279052,8.418073505859375,13.523003492279052C8.547913505859375,13.435263492279052,8.763453505859374,13.326893492279053,9.064693505859374,13.197863492279053C9.516553505859374,13.004333492279052,9.976203505859374,12.872733492279053,10.459223505859375,12.779863492279052C10.942243505859375,12.679263492279052,11.433053505859375,12.617333492279052,11.955023505859375,12.617333492279052L13.380683505859375,7.810353492279052L13.458613505859375,7.779393492279053ZM15.273813505859374,8.135463492279053L15.016753505859375,5.333333492279053L13.458613505859375,7.787133492279053C13.817013505859375,7.818093492279052,14.144213505859375,7.880023492279053,14.494743505859375,7.949683492279053C14.494743505859375,7.944523492279053,14.754433505859375,8.006453492279054,15.273813505859374,8.135463492279053ZM12.064083505859376,12.648273492279053L11.378523505859375,14.970463492279054L12.515943505859376,16.00003349227905L14.074083505859376,15.643933492279054L14.525943505859376,13.027603492279052C14.198743505859374,12.934663492279054,13.879283505859375,12.834063492279054,13.552083505859375,12.772133492279053C13.069083505859375,12.717933492279052,12.578283505859375,12.648273492279053,12.064083505859376,12.648273492279053ZM18.327743505859374,9.428153492279053C17.829143505859374,9.128843492279053,17.330543505859374,8.870823492279053,16.831943505859375,8.654083492279053C16.348943505859374,8.460573492279053,15.826943505859376,8.267053492279054,15.305013505859375,8.135463492279053L15.305013505859375,8.267053492279054L14.463613505859374,13.043063492279053C14.596083505859376,13.105003492279053,14.759683505859375,13.135933492279053,14.884283505859376,13.205603492279053C15.185523505859376,13.334623492279052,15.401043505859375,13.443003492279052,15.530943505859375,13.530733492279053C15.858143505859376,13.724263492279054,16.341143505859375,13.623603492279052,16.535943505859375,13.306263492279053L18.514743505859375,10.403483492279053C18.779643505859376,10.039673492279054,18.686143505859377,9.621673492279053,18.327743505859374,9.428153492279053Z", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + } + ] + } + ] + }, + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M25.044,2.668L34.676,2.668L34.676,4.04L25.044,4.04L25.044,2.668ZM29.958,7.82Q29.258,9.066,28.355,10.41Q27.451999999999998,11.754,26.92,12.3L32.506,11.782Q31.442,10.158,30.84,9.346L32.058,8.562000000000001Q32.786,9.5,33.843,11.012Q34.9,12.524,35.516,13.546L34.214,14.526Q33.891999999999996,13.966,33.346000000000004,13.098Q32.016,13.182,29.734,13.378Q27.451999999999998,13.574,25.87,13.742L25.31,13.812L24.834,13.882L24.414,12.468Q24.708,12.37,24.862000000000002,12.265Q25.016,12.16,25.121,12.069Q25.226,11.978,25.268,11.936Q25.912,11.32,26.724,10.165Q27.536,9.01,28.208,7.82L23.854,7.82L23.854,6.434L35.866,6.434L35.866,7.82L29.958,7.82ZM42.656,7.414L42.656,8.576L41.354,8.576L41.354,1.814L42.656,1.87L42.656,7.036Q43.314,5.846,43.888000000000005,4.369Q44.462,2.892,44.714,1.6600000000000001L46.086,1.981999Q45.96,2.612,45.722,3.41L49.6,3.41L49.6,4.74L45.274,4.74Q44.616,6.56,43.706,8.128L42.656,7.414ZM38.596000000000004,2.346L39.884,2.402L39.884,8.212L38.596000000000004,8.212L38.596000000000004,2.346ZM46.184,4.964Q46.688,5.356,47.5,6.175Q48.312,6.994,48.788,7.582L47.751999999999995,8.59Q47.346000000000004,8.072,46.576,7.274Q45.806,6.476,45.204,5.902L46.184,4.964ZM48.41,9.01L48.41,12.706L49.894,12.706L49.894,13.966L37.391999999999996,13.966L37.391999999999996,12.706L38.848,12.706L38.848,9.01L48.41,9.01ZM41.676,10.256L40.164,10.256L40.164,12.706L41.676,12.706L41.676,10.256ZM42.908,12.706L44.364000000000004,12.706L44.364000000000004,10.256L42.908,10.256L42.908,12.706ZM45.582,12.706L47.108000000000004,12.706L47.108000000000004,10.256L45.582,10.256L45.582,12.706ZM54.906,7.456L55.116,8.394L54.178,8.814L54.178,12.818Q54.178,13.434,54.031,13.735Q53.884,14.036,53.534,14.162Q53.184,14.288,52.456,14.358L51.867999999999995,14.414L51.476,13.084L52.162,13.028Q52.512,13,52.652,12.958Q52.792,12.916,52.841,12.797Q52.89,12.678,52.89,12.384L52.89,9.36Q51.980000000000004,9.724,51.322,9.948L51.013999999999996,8.576Q51.798,8.324,52.89,7.876L52.89,5.524L51.42,5.524L51.42,4.166L52.89,4.166L52.89,1.7579989999999999L54.178,1.814L54.178,4.166L55.214,4.166L55.214,5.524L54.178,5.524L54.178,7.316L54.808,7.022L54.906,7.456ZM56.894,4.5440000000000005L56.894,6.098L55.564,6.098L55.564,3.256L58.686,3.256Q58.42,2.346,58.266,1.9260000000000002L59.624,1.7579989999999999Q59.848,2.276,60.142,3.256L63.25,3.256L63.25,6.098L61.962,6.098L61.962,4.5440000000000005L56.894,4.5440000000000005ZM59.008,6.322Q58.392,6.938,57.685,7.512Q56.978,8.086,55.956,8.841999999999999L55.242,7.764Q56.824,6.728,58.126,5.37L59.008,6.322ZM60.422,5.37Q61.024,5.776,62.095,6.581Q63.166,7.386,63.656,7.806L62.942,8.982Q62.368,8.45,61.332,7.652Q60.296,6.854,59.666,6.434L60.422,5.37ZM62.592,10.256L60.044,10.256L60.044,12.566L63.572,12.566L63.572,13.826L55.144,13.826L55.144,12.566L58.63,12.566L58.63,10.256L56.054,10.256L56.054,8.982L62.592,8.982L62.592,10.256Z", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + } + ] + } + ] + } + ] + }, + "name": "AliyunIcon" } diff --git a/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json b/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json index c8093ba660..ea60744daf 100644 --- a/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json +++ b/web/app/components/base/icons/src/public/tracing/AliyunIconBig.json @@ -1,117 +1,71 @@ { - "icon": { - "type": "element", - "isRootNode": true, - "name": "svg", - "attributes": { - "xmlns": "http://www.w3.org/2000/svg", - "xmlns:xlink": "http://www.w3.org/1999/xlink", - "fill": "none", - "version": "1.1", - "width": "159", - "height": "24", - "viewBox": "0 0 159 24" - }, - "children": [ - { - "type": "element", - "name": "defs", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "clipPath", - "attributes": { - "id": "master_svg0_42_18775" - }, - "children": [ - { - "type": "element", - "name": "rect", - "attributes": { - "x": "0", - "y": "0", - "width": "28.5", - "height": "24", - "rx": "0" - }, - "children": [] - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "g", - "attributes": { - "clip-path": "url(#master_svg0_42_18775)" - }, - "children": [ - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M6.10294,22C5.68819,22,5.18195,21.8532,4.58421,21.5595C4.46861,21.5027,4.39038,21.4658,4.34951,21.4488C3.49789,21.0943,2.74367,20.5941,2.08684,19.9484C0.695613,18.5806,0,16.9311,0,15C0,13.0689,0.695612,11.4194,2.08684,10.0516C3.24259,8.91537,4.59607,8.2511,6.14728,8.05878C6.34758,5.97414,7.22633,4.16634,8.78354,2.63539C10.5706,0.878463,12.7286,0,15.2573,0C17.2884,0,19.1146,0.595472,20.7358,1.78642C22.327,2.95528,23.4151,4.46783,24,6.32406L22.0568,6.91594C21.6024,5.47377,20.7561,4.29798,19.5181,3.38858C18.2579,2.46286,16.8377,2,15.2573,2C13.2903,2,11.6119,2.6832,10.222,4.04961C8.83217,5.41601,8.13725,7.06614,8.13725,9L8.13725,10L7.12009,10C5.71758,10,4.51932,10.4886,3.52532,11.4659C2.53132,12.4431,2.03431,13.6211,2.03431,15C2.03431,16.3789,2.53132,17.5569,3.52532,18.5341C3.99531,18.9962,4.53447,19.3538,5.14278,19.6071C5.2229,19.6405,5.33983,19.695,5.49356,19.7705C5.80505,19.9235,6.00818,20,6.10294,20L6.10294,22Z", - "fill-rule": "evenodd", - "fill": "#000000", - "fill-opacity": "1", - "style": "mix-blend-mode:passthrough" - }, - "children": [] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M20.18796103515625,11.66909C19.46346103515625,11.5762,18.72726103515625,11.52975,17.991011035156248,11.52975C16.728921035156247,11.52975,15.45515103515625,11.66909,14.23981103515625,11.91292C13.02447103515625,12.156749999999999,11.85588103515625,12.539909999999999,10.73402103515625,12.98113C9.98612103515625,13.306239999999999,9.23822103515625,13.69327,8.49031803515625,14.14223C7.99950790415625,14.43251,7.85927603515625,15.10595,8.15142503515625,15.59361L11.11966103515625,19.9478C11.45855103515625,20.4354,12.13634103515625,20.5747,12.627151035156249,20.2845C12.821921035156251,20.152900000000002,13.14523103515625,19.990299999999998,13.59708103515625,19.796799999999998C14.27487103515625,19.506500000000003,14.964341035156249,19.3091,15.68887103515625,19.169800000000002C16.413401035156248,19.018900000000002,17.14962103515625,18.926000000000002,17.93258103515625,18.926000000000002L20.071061035156248,11.715530000000001L20.18796103515625,11.66909ZM22.91076103515625,12.20319L22.525161035156252,8L20.18796103515625,11.6807C20.72556103515625,11.72714,21.21636103515625,11.82003,21.74216103515625,11.92453C21.74216103515625,11.91679,22.13166103515625,12.00968,22.91076103515625,12.20319ZM18.09616103515625,18.9724L17.06782103515625,22.4557L18.773961035156248,24L21.11116103515625,23.465899999999998L21.788961035156248,19.5414C21.298161035156248,19.402,20.81896103515625,19.2511,20.32816103515625,19.1582C19.60366103515625,19.076900000000002,18.86746103515625,18.9724,18.09616103515625,18.9724ZM27.49166103515625,14.14223C26.74376103515625,13.69327,25.99586103515625,13.306239999999999,25.24796103515625,12.98113C24.52346103515625,12.69086,23.74046103515625,12.40058,22.95756103515625,12.20319L22.95756103515625,12.40058L21.69546103515625,19.5646C21.89416103515625,19.6575,22.139561035156248,19.7039,22.32646103515625,19.8084C22.77836103515625,20.0019,23.101661035156248,20.1645,23.29646103515625,20.2961C23.78726103515625,20.586399999999998,24.51176103515625,20.4354,24.80396103515625,19.959400000000002L27.77216103515625,15.605229999999999C28.16946103515625,15.05951,28.02926103515625,14.43251,27.49166103515625,14.14223Z", - "fill": "#000000", - "fill-opacity": "1", - "style": "mix-blend-mode:passthrough" - }, - "children": [] - } - ] - } - ] - }, - { - "type": "element", - "name": "g", - "attributes": {}, - "children": [ - { - "type": "element", - "name": "path", - "attributes": { - "d": "M53.295,19.1189814453125L51.951,21.2189814453125Q46.05,17.6279814453125,43.971000000000004,13.0079814453125Q42.921,15.4019814453125,40.884,17.3969814453125Q38.847,19.3919814453125,35.97,21.2399814453125L34.5,19.1609814453125Q41.997,14.9609814453125,42.585,9.2489814453125L35.214,9.2489814453125L35.214,7.1069814453125L42.647999999999996,7.1069814453125L42.647999999999996,2.2979812453125L44.958,2.3819804453125L44.958,7.1069814453125L52.455,7.1069814453125L52.455,9.2489814453125L44.916,9.2489814453125L44.894999999999996,9.5219814453125Q45.650999999999996,12.6509814453125,47.646,14.8979814453125Q49.641,17.1449814453125,53.295,19.1189814453125ZM66.021,7.0649814453125L64.215,7.0649814453125L64.215,5.9099814453125L61.653,5.9099814453125L61.653,4.1039814453125L64.215,4.1039814453125L64.215,2.2559814453125L66.021,2.3399810453125L66.021,4.1039814453125L68.77199999999999,4.1039814453125L68.77199999999999,2.2559814453125L70.557,2.3399810453125L70.557,4.1039814453125L73.413,4.1039814453125L73.413,5.9099814453125L70.557,5.9099814453125L70.557,7.0649814453125L68.77199999999999,7.0649814453125L68.77199999999999,5.9099814453125L66.021,5.9099814453125L66.021,7.0649814453125ZM68.814,16.8929814453125Q69.549,17.9009814453125,70.84049999999999,18.6044814453125Q72.132,19.3079814453125,74.19,19.7279814453125L73.62299999999999,21.6179814453125Q69.36,20.5679814453125,67.449,18.1109814453125Q66.693,19.2449814453125,65.202,20.1059814453125Q63.711,20.9669814453125,61.296,21.6389814453125L60.54,19.8119814453125Q62.766,19.3289814453125,64.0575,18.6044814453125Q65.349,17.879981445312502,65.895,16.8929814453125L61.317,16.8929814453125L61.317,15.2339814453125L66.378,15.2339814453125Q66.399,15.1499814453125,66.399,15.0029814453125Q66.42,14.7299814453125,66.42,13.9949814453125L62.262,13.9949814453125L62.262,12.4199814453125L60.96,13.3439814453125Q60.519,12.3779814453125,59.784,11.2439814453125L59.784,21.2189814453125L57.957,21.2189814453125L57.957,12.0839814453125Q56.949,14.6669814453125,55.962,16.3049814453125L54.45,14.7929814453125Q55.332,13.3649814453125,56.193,11.5904814453125Q57.054,9.815981445312499,57.620999999999995,8.1779814453125L55.521,8.1779814453125L55.521,6.2669814453125L57.957,6.2669814453125L57.957,2.3189811453125L59.784,2.4029824453125L59.784,6.2669814453125L61.757999999999996,6.2669814453125L61.757999999999996,8.1779814453125L59.784,8.1779814453125L59.784,10.3829814453125L60.708,9.6689814453125Q61.59,10.7609814453125,62.262,12.0419814453125L62.262,7.5479814453125L72.489,7.5479814453125L72.489,13.9949814453125L68.37299999999999,13.9949814453125Q68.331,14.7089814453125,68.331,15.0029814453125L68.331,15.2339814453125L73.497,15.2339814453125L73.497,16.8929814453125L68.814,16.8929814453125ZM70.809,10.1099814453125L70.809,9.1019814453125L64.005,9.1019814453125L64.005,10.1099814453125L70.809,10.1099814453125ZM70.809,11.4749814453125L64.005,11.4749814453125L64.005,12.4409814453125L70.809,12.4409814453125L70.809,11.4749814453125ZM88.89,13.7639814453125L88.30199999999999,11.8529814453125L89.856,11.7269814453125Q90.63300000000001,11.6639814453125,90.88499999999999,11.4644814453125Q91.137,11.2649814453125,91.137,10.5929814453125L91.137,2.6969814453125L93.09,2.7809824453125L93.09,11.1179814453125Q93.09,12.0839814453125,92.85900000000001,12.5879814453125Q92.628,13.0919814453125,92.0715,13.3229814453125Q91.515,13.5539814453125,90.444,13.6379814453125L88.89,13.7639814453125ZM76.35300000000001,13.5959814453125Q77.445,12.4619814453125,77.928,11.6639814453125Q78.411,10.8659814453125,78.55799999999999,9.8579814453125L76.311,9.8579814453125L76.311,8.0309814453125L78.684,8.0309814453125L78.684,7.4639814453125L78.684,5.2589814453125L76.836,5.2589814453125L76.836,3.3689814453125L86.706,3.3689814453125L86.706,5.2589814453125L84.9,5.2589814453125L84.9,8.0309814453125L87.126,8.0309814453125L87.126,9.8579814453125L84.9,9.8579814453125L84.9,13.4909814453125L82.926,13.4909814453125L82.926,9.8579814453125L80.532,9.8579814453125Q80.364,11.3699814453125,79.797,12.4619814453125Q79.22999999999999,13.5539814453125,77.949,14.8349814453125L76.35300000000001,13.5959814453125ZM87.672,3.7679814453125L89.583,3.8519814453125L89.583,11.0969814453125L87.672,11.0969814453125L87.672,3.7679814453125ZM80.637,5.2589814453125L80.637,7.4849814453125L80.637,8.0309814453125L82.926,8.0309814453125L82.926,5.2589814453125L80.637,5.2589814453125ZM86.223,16.7039814453125L86.223,18.9719814453125L94.32900000000001,18.9719814453125L94.32900000000001,20.8409814453125L76.017,20.8409814453125L76.017,18.9719814453125L84.144,18.9719814453125L84.144,16.7039814453125L78.15899999999999,16.7039814453125L78.15899999999999,14.8769814453125L84.144,14.8769814453125L84.144,13.6799814453125L86.223,13.7639814453125L86.223,14.8769814453125L92.229,14.8769814453125L92.229,16.7039814453125L86.223,16.7039814453125ZM115.119,3.4739814453125L115.119,5.5319814453125L112.494,5.5319814453125L112.494,18.0899814453125Q112.494,19.3289814453125,112.2315,19.9169814453125Q111.969,20.5049814453125,111.3075,20.7569814453125Q110.646,21.0089814453125,109.239,21.1349814453125L107.874,21.2609814453125L107.223,19.1819814453125L108.819,19.0559814453125Q109.554,18.9929814453125,109.8795,18.8669814453125Q110.205,18.7409814453125,110.31,18.4469814453125Q110.415,18.1529814453125,110.415,17.501981445312502L110.415,5.5319814453125L96.59700000000001,5.5319814453125L96.59700000000001,3.4739814453125L115.119,3.4739814453125ZM98.802,7.9679814453125L107.433,7.9679814453125L107.433,17.2499814453125L98.802,17.2499814453125L98.802,7.9679814453125ZM100.797,15.2129814453125L105.459,15.2129814453125L105.459,10.0259814453125L100.797,10.0259814453125L100.797,15.2129814453125ZM132.192,5.1539814453125L126.711,5.1539814453125L126.711,15.1289814453125L124.737,15.1289814453125L124.737,3.1799814453125L134.166,3.1799814453125L134.166,15.0869814453125L132.192,15.0869814453125L132.192,5.1539814453125ZM123.036,18.6569814453125Q122.385,17.2499814453125,121.482,15.4649814453125Q120.327,17.9009814453125,118.311,20.8199814453125L116.715,19.4549814453125Q119.088,16.2839814453125,120.369,13.2179814453125Q118.584,9.7739814453125,117.534,8.0099814453125L119.067,7.0229814453125Q119.76,8.1149814453125,121.251,10.7609814453125Q121.839,8.7449814453125,122.217,6.0989814453125L117.576,6.0989814453125L117.576,4.0829814453125L124.254,4.0829814453125L124.254,6.0989814453125Q123.75,9.8579814453125,122.511,13.0919814453125Q123.771,15.4439814453125,124.695,17.3549814453125L123.036,18.6569814453125ZM135.78300000000002,16.5779814453125Q135.72,17.8379814453125,135.594,18.6359814453125Q135.46800000000002,19.6019814453125,135.237,20.0849814453125Q135.006,20.5679814453125,134.523,20.7779814453125Q134.04000000000002,20.9879814453125,133.095,20.9879814453125L131.247,20.9879814453125Q130.05,20.9879814453125,129.5775,20.4839814453125Q129.10500000000002,19.9799814453125,129.10500000000002,18.6359814453125L129.10500000000002,16.3469814453125Q128.349,17.8379814453125,127.068,19.1399814453125Q125.787,20.4419814453125,123.834,21.7439814453125L122.532,20.0219814453125Q124.863,18.5939814453125,126.0705,17.2394814453125Q127.278,15.8849814453125,127.74,14.2994814453125Q128.202,12.7139814453125,128.286,10.2569814453125L128.349,6.1409814453125L130.449,6.224981445312499L130.386,10.5089814453125Q130.32299999999998,12.2309814453125,130.05,13.5959814453125L131.058,13.6379814453125L131.058,17.9219814453125Q131.058,18.5939814453125,131.226,18.7829814453125Q131.394,18.9719814453125,131.982,18.9719814453125L132.696,18.9719814453125Q133.263,18.9719814453125,133.4625,18.7934814453125Q133.662,18.6149814453125,133.74599999999998,17.942981445312498Q133.872,16.7249814453125,133.872,15.8639814453125L135.78300000000002,16.5779814453125ZM139.374,2.5079814453125Q140.088,2.9909814453125,141.054,3.8204814453125Q142.01999999999998,4.6499814453125,142.587,5.2379814453125L141.39,6.8759814453125Q140.928,6.3089814453125,139.941,5.3954814453125Q138.954,4.4819814453125,138.28199999999998,3.9569814453125L139.374,2.5079814453125ZM152.184,19.0769814453125Q152.751,19.0139814453125,153.014,18.9299814453125Q153.276,18.8459814453125,153.381,18.6359814453125Q153.486,18.4259814453125,153.486,17.9639814453125L153.486,2.6549814453125L155.124,2.7389824453125L155.124,18.5939814453125Q155.124,19.5389814453125,154.95600000000002,20.0009814453125Q154.788,20.4629814453125,154.315,20.6729814453125Q153.84300000000002,20.8829814453125,152.83499999999998,20.9669814453125L151.659,21.0509814453125L151.09199999999998,19.1609814453125L152.184,19.0769814453125ZM142.587,15.8429814453125L142.587,3.4529814453125L149.286,3.4529814453125L149.286,15.7799814453125L147.543,15.7799814453125L147.543,5.2799814453125L144.288,5.2799814453125L144.288,15.8429814453125L142.587,15.8429814453125ZM150.546,16.4099814453125L150.546,4.4819814453125L152.184,4.5659814453125005L152.184,16.4099814453125L150.546,16.4099814453125ZM141.012,19.7279814453125Q142.81799999999998,18.4049814453125,143.679,17.3654814453125Q144.54000000000002,16.3259814453125,144.834,15.0974814453125Q145.128,13.8689814453125,145.128,11.7689814453125L145.128,6.224981445312499L146.76600000000002,6.3089814453125L146.76600000000002,11.7689814453125Q146.76600000000002,14.2889814453125,146.33499999999998,15.8954814453125Q145.905,17.501981445312502,144.95,18.6779814453125Q143.994,19.8539814453125,142.209,21.1979814453125L141.012,19.7279814453125ZM138.639,7.2329814453125Q139.353,7.7369814453125,140.329,8.5874814453125Q141.30599999999998,9.4379814453125,141.957,10.1099814453125L140.76,11.7899814453125Q140.151,11.0969814453125,139.174,10.2044814453125Q138.19799999999998,9.311981445312501,137.421,8.7239814453125L138.639,7.2329814453125ZM137.82,20.2949814453125Q138.156,19.3709814453125,138.933,16.5989814453125Q139.70999999999998,13.8269814453125,139.878,12.9029814453125L140.781,13.1969814453125L141.642,13.4909814453125Q141.369,14.7299814453125,140.66500000000002,17.2814814453125Q139.962,19.8329814453125,139.60500000000002,20.9249814453125L137.82,20.2949814453125ZM147.144,15.9689814453125Q148.86599999999999,17.5439814453125,150.10500000000002,19.1189814453125L148.86599999999999,20.4839814453125Q148.06799999999998,19.4129814453125,147.449,18.6884814453125Q146.829,17.9639814453125,146.01,17.207981445312498L147.144,15.9689814453125Z", - "fill": "#000000", - "fill-opacity": "1" - }, - "children": [] - } - ] - } - ] - } - ] - }, - "name": "AliyunIconBig" + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "xmlns": "http://www.w3.org/2000/svg", + "xmlns:xlink": "http://www.w3.org/1999/xlink", + "fill": "none", + "version": "1.1", + "width": "96", + "height": "24", + "viewBox": "0 0 96 24" + }, + "children": [ + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M6.10294,22C5.68819,22,5.18195,21.8532,4.58421,21.5595C4.46861,21.5027,4.39038,21.4658,4.34951,21.4488C3.49789,21.0943,2.74367,20.5941,2.08684,19.9484C0.695613,18.5806,0,16.9311,0,15C0,13.0689,0.695612,11.4194,2.08684,10.0516C3.24259,8.91537,4.59607,8.2511,6.14728,8.05878C6.34758,5.97414,7.22633,4.16634,8.78354,2.63539C10.5706,0.878463,12.7286,0,15.2573,0C17.2884,0,19.1146,0.595472,20.7358,1.78642C22.327,2.95528,23.4151,4.46783,24,6.32406L22.0568,6.91594C21.6024,5.47377,20.7561,4.29798,19.5181,3.38858C18.2579,2.46286,16.8377,2,15.2573,2C13.2903,2,11.6119,2.6832,10.222,4.04961C8.83217,5.41601,8.13725,7.06614,8.13725,9L8.13725,10L7.12009,10C5.71758,10,4.51932,10.4886,3.52532,11.4659C2.53132,12.4431,2.03431,13.6211,2.03431,15C2.03431,16.3789,2.53132,17.5569,3.52532,18.5341C3.99531,18.9962,4.53447,19.3538,5.14278,19.6071C5.2229,19.6405,5.33983,19.695,5.49356,19.7705C5.80505,19.9235,6.00818,20,6.10294,20L6.10294,22Z", + "fill-rule": "evenodd", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + }, + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M20.18796103515625,11.66909C19.46346103515625,11.5762,18.72726103515625,11.52975,17.991011035156248,11.52975C16.728921035156247,11.52975,15.45515103515625,11.66909,14.23981103515625,11.91292C13.02447103515625,12.156749999999999,11.85588103515625,12.539909999999999,10.73402103515625,12.98113C9.98612103515625,13.306239999999999,9.23822103515625,13.69327,8.49031803515625,14.14223C7.99950790415625,14.43251,7.85927603515625,15.10595,8.15142503515625,15.59361L11.11966103515625,19.9478C11.45855103515625,20.4354,12.13634103515625,20.5747,12.627151035156249,20.2845C12.821921035156251,20.152900000000002,13.14523103515625,19.990299999999998,13.59708103515625,19.796799999999998C14.27487103515625,19.506500000000003,14.964341035156249,19.3091,15.68887103515625,19.169800000000002C16.413401035156248,19.018900000000002,17.14962103515625,18.926000000000002,17.93258103515625,18.926000000000002L20.071061035156248,11.715530000000001L20.18796103515625,11.66909ZM22.91076103515625,12.20319L22.525161035156252,8L20.18796103515625,11.6807C20.72556103515625,11.72714,21.21636103515625,11.82003,21.74216103515625,11.92453C21.74216103515625,11.91679,22.13166103515625,12.00968,22.91076103515625,12.20319ZM18.09616103515625,18.9724L17.06782103515625,22.4557L18.773961035156248,24L21.11116103515625,23.465899999999998L21.788961035156248,19.5414C21.298161035156248,19.402,20.81896103515625,19.2511,20.32816103515625,19.1582C19.60366103515625,19.076900000000002,18.86746103515625,18.9724,18.09616103515625,18.9724ZM27.49166103515625,14.14223C26.74376103515625,13.69327,25.99586103515625,13.306239999999999,25.24796103515625,12.98113C24.52346103515625,12.69086,23.74046103515625,12.40058,22.95756103515625,12.20319L22.95756103515625,12.40058L21.69546103515625,19.5646C21.89416103515625,19.6575,22.139561035156248,19.7039,22.32646103515625,19.8084C22.77836103515625,20.0019,23.101661035156248,20.1645,23.29646103515625,20.2961C23.78726103515625,20.586399999999998,24.51176103515625,20.4354,24.80396103515625,19.959400000000002L27.77216103515625,15.605229999999999C28.16946103515625,15.05951,28.02926103515625,14.43251,27.49166103515625,14.14223Z", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + }, + { + "type": "element", + "name": "g", + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M35.785,3.8624638671875L50.233000000000004,3.8624638671875L50.233000000000004,5.9204638671875L35.785,5.9204638671875L35.785,3.8624638671875ZM43.156,11.5904638671875Q42.106,13.4594638671875,40.7515,15.4754638671875Q39.397,17.4914638671875,38.599000000000004,18.3104638671875L46.978,17.5334638671875Q45.382,15.0974638671875,44.479,13.8794638671875L46.306,12.7034638671875Q47.397999999999996,14.1104638671875,48.9835,16.3784638671875Q50.569,18.6464638671875,51.492999999999995,20.1794638671875L49.54,21.6494638671875Q49.057,20.8094638671875,48.238,19.5074638671875Q46.243,19.6334638671875,42.82,19.9274638671875Q39.397,20.2214638671875,37.024,20.4734638671875L36.184,20.5784638671875L35.47,20.6834638671875L34.84,18.5624638671875Q35.281,18.4154638671875,35.512,18.2579638671875Q35.743,18.1004638671875,35.9005,17.963963867187502Q36.058,17.8274638671875,36.121,17.7644638671875Q37.087,16.840463867187502,38.305,15.1079638671875Q39.522999999999996,13.3754638671875,40.531,11.5904638671875L34,11.5904638671875L34,9.5114638671875L52.018,9.5114638671875L52.018,11.5904638671875L43.156,11.5904638671875ZM62.203,10.9814638671875L62.203,12.7244638671875L60.25,12.7244638671875L60.25,2.5814638671875L62.203,2.6654638671875L62.203,10.4144638671875Q63.19,8.6294638671875,64.051,6.4139638671875Q64.912,4.1984638671875,65.28999999999999,2.3504638671875L67.348,2.8334628671875Q67.15899999999999,3.7784638671875,66.80199999999999,4.9754638671875L72.619,4.9754638671875L72.619,6.9704638671875L66.13,6.9704638671875Q65.143,9.7004638671875,63.778,12.0524638671875L62.203,10.9814638671875ZM56.113,3.3794638671875L58.045,3.4634638671875L58.045,12.1784638671875L56.113,12.1784638671875L56.113,3.3794638671875ZM67.495,7.3064638671875Q68.251,7.8944638671875,69.469,9.1229638671875Q70.687,10.3514638671875,71.40100000000001,11.2334638671875L69.84700000000001,12.7454638671875Q69.238,11.9684638671875,68.083,10.7714638671875Q66.928,9.5744638671875,66.025,8.7134638671875L67.495,7.3064638671875ZM70.834,13.3754638671875L70.834,18.9194638671875L73.06,18.9194638671875L73.06,20.8094638671875L54.307,20.8094638671875L54.307,18.9194638671875L56.491,18.9194638671875L56.491,13.3754638671875L70.834,13.3754638671875ZM60.733000000000004,15.2444638671875L58.465,15.2444638671875L58.465,18.9194638671875L60.733000000000004,18.9194638671875L60.733000000000004,15.2444638671875ZM62.581,18.9194638671875L64.765,18.9194638671875L64.765,15.2444638671875L62.581,15.2444638671875L62.581,18.9194638671875ZM66.592,18.9194638671875L68.881,18.9194638671875L68.881,15.2444638671875L66.592,15.2444638671875L66.592,18.9194638671875ZM80.578,11.0444638671875L80.893,12.4514638671875L79.48599999999999,13.0814638671875L79.48599999999999,19.0874638671875Q79.48599999999999,20.0114638671875,79.2655,20.4629638671875Q79.045,20.9144638671875,78.52000000000001,21.1034638671875Q77.995,21.2924638671875,76.90299999999999,21.3974638671875L76.021,21.4814638671875L75.43299999999999,19.4864638671875L76.462,19.4024638671875Q76.987,19.3604638671875,77.197,19.2974638671875Q77.407,19.2344638671875,77.4805,19.0559638671875Q77.554,18.8774638671875,77.554,18.4364638671875L77.554,13.9004638671875Q76.189,14.4464638671875,75.202,14.7824638671875L74.74000000000001,12.7244638671875Q75.916,12.3464638671875,77.554,11.6744638671875L77.554,8.1464638671875L75.34899999999999,8.1464638671875L75.34899999999999,6.1094638671875L77.554,6.1094638671875L77.554,2.4974628671875L79.48599999999999,2.5814638671875L79.48599999999999,6.1094638671875L81.03999999999999,6.1094638671875L81.03999999999999,8.1464638671875L79.48599999999999,8.1464638671875L79.48599999999999,10.8344638671875L80.431,10.3934638671875L80.578,11.0444638671875ZM83.56,6.6764638671875L83.56,9.0074638671875L81.565,9.0074638671875L81.565,4.7444638671875L86.24799999999999,4.7444638671875Q85.84899999999999,3.3794638671875,85.618,2.7494638671875L87.655,2.4974628671875Q87.991,3.2744638671875,88.432,4.7444638671875L93.094,4.7444638671875L93.094,9.0074638671875L91.162,9.0074638671875L91.162,6.6764638671875L83.56,6.6764638671875ZM86.731,9.3434638671875Q85.807,10.2674638671875,84.7465,11.1284638671875Q83.686,11.9894638671875,82.15299999999999,13.1234638671875L81.082,11.5064638671875Q83.455,9.9524638671875,85.408,7.9154638671875L86.731,9.3434638671875ZM88.852,7.9154638671875Q89.755,8.5244638671875,91.3615,9.731963867187499Q92.968,10.9394638671875,93.703,11.5694638671875L92.632,13.3334638671875Q91.771,12.5354638671875,90.217,11.3384638671875Q88.663,10.1414638671875,87.718,9.5114638671875L88.852,7.9154638671875ZM92.107,15.2444638671875L88.285,15.2444638671875L88.285,18.7094638671875L93.577,18.7094638671875L93.577,20.5994638671875L80.935,20.5994638671875L80.935,18.7094638671875L86.164,18.7094638671875L86.164,15.2444638671875L82.3,15.2444638671875L82.3,13.3334638671875L92.107,13.3334638671875L92.107,15.2444638671875Z", + "fill": "#FF6A00", + "fill-opacity": "1" + } + } + ] + } + ] + } + ] + }, + "name": "AliyunBigIcon" } From bd5b9385719e642cad37c32e725ca8846b8d192d Mon Sep 17 00:00:00 2001 From: Mike Zixuan HE Date: Mon, 28 Jul 2025 11:03:19 +0800 Subject: [PATCH 025/415] feat: Support allOf in OpenAPI properties inside schema #22946 (#22975) --- api/core/tools/utils/parser.py | 23 ++++++++ .../core/tools/utils/test_parser.py | 55 +++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index a3c84615ca..3857a2a16b 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -105,6 +105,29 @@ class ApiBasedToolSchemaParser: # overwrite the content interface["operation"]["requestBody"]["content"][content_type]["schema"] = root + # handle allOf reference in schema properties + for prop_dict in root.get("properties", {}).values(): + for item in prop_dict.get("allOf", []): + if "$ref" in item: + ref_schema = openapi + reference = item["$ref"].split("/")[1:] + for ref in reference: + ref_schema = ref_schema[ref] + else: + ref_schema = item + for key, value in ref_schema.items(): + if isinstance(value, list): + if key not in prop_dict: + prop_dict[key] = [] + # extends list field + if isinstance(prop_dict[key], list): + prop_dict[key].extend(value) + elif key not in prop_dict: + # add new field + prop_dict[key] = value + if "allOf" in prop_dict: + del prop_dict["allOf"] + # parse body parameters if "schema" in interface["operation"]["requestBody"]["content"][content_type]: body_schema = interface["operation"]["requestBody"]["content"][content_type]["schema"] diff --git a/api/tests/unit_tests/core/tools/utils/test_parser.py b/api/tests/unit_tests/core/tools/utils/test_parser.py index 8e07293ce0..e1eab21ca4 100644 --- a/api/tests/unit_tests/core/tools/utils/test_parser.py +++ b/api/tests/unit_tests/core/tools/utils/test_parser.py @@ -54,3 +54,58 @@ def test_parse_openapi_to_tool_bundle_operation_id(app): assert tool_bundles[0].operation_id == "_get" assert tool_bundles[1].operation_id == "apiresources_get" assert tool_bundles[2].operation_id == "createResource" + + +def test_parse_openapi_to_tool_bundle_properties_all_of(app): + openapi = { + "openapi": "3.0.0", + "info": {"title": "Simple API", "version": "1.0.0"}, + "servers": [{"url": "http://localhost:3000"}], + "paths": { + "/api/resource": { + "get": { + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Request", + }, + }, + }, + "required": True, + }, + }, + }, + }, + "components": { + "schemas": { + "Request": { + "type": "object", + "properties": { + "prop1": { + "enum": ["option1"], + "description": "desc prop1", + "allOf": [ + {"$ref": "#/components/schemas/AllOfItem"}, + { + "enum": ["option2"], + }, + ], + }, + }, + }, + "AllOfItem": { + "type": "string", + "enum": ["option3"], + "description": "desc allOf item", + }, + } + }, + } + with app.test_request_context(): + tool_bundles = ApiBasedToolSchemaParser.parse_openapi_to_tool_bundle(openapi) + + assert tool_bundles[0].parameters[0].type == "string" + assert tool_bundles[0].parameters[0].llm_description == "desc prop1" + # TODO: support enum in OpenAPI + # assert set(tool_bundles[0].parameters[0].options) == {"option1", "option2", "option3"} From ee731c7810be81cc6b935a78299755ae95a0b718 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Mon, 28 Jul 2025 13:58:21 +0800 Subject: [PATCH 026/415] chore: Updata eslint config dependencies (#23040) --- web/package.json | 10 +- web/pnpm-lock.yaml | 328 +++++++++++++++++++++++++++++---------------- 2 files changed, 220 insertions(+), 118 deletions(-) diff --git a/web/package.json b/web/package.json index 2470a70dec..6915620312 100644 --- a/web/package.json +++ b/web/package.json @@ -152,7 +152,7 @@ "zustand": "^4.5.2" }, "devDependencies": { - "@antfu/eslint-config": "^4.1.1", + "@antfu/eslint-config": "^5.0.0", "@chromatic-com/storybook": "^3.1.0", "@eslint-react/eslint-plugin": "^1.15.0", "@eslint/eslintrc": "^3.1.0", @@ -160,7 +160,7 @@ "@faker-js/faker": "^9.0.3", "@happy-dom/jest-environment": "^17.4.4", "@next/bundle-analyzer": "^15.4.1", - "@next/eslint-plugin-next": "~15.3.5", + "@next/eslint-plugin-next": "~15.4.4", "@rgrove/parse-xml": "^4.1.0", "@storybook/addon-essentials": "8.5.0", "@storybook/addon-interactions": "8.5.0", @@ -197,7 +197,7 @@ "code-inspector-plugin": "^0.18.1", "cross-env": "^7.0.3", "eslint": "^9.20.1", - "eslint-config-next": "~15.3.5", + "eslint-config-next": "~15.4.4", "eslint-plugin-oxlint": "^1.6.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", @@ -216,7 +216,7 @@ "tailwindcss": "^3.4.14", "ts-node": "^10.9.2", "typescript": "^5.8.3", - "typescript-eslint": "^8.36.0", + "typescript-eslint": "^8.38.0", "uglify-js": "^3.19.3" }, "resolutions": { @@ -270,4 +270,4 @@ "which-typed-array": "npm:@nolyfill/which-typed-array@^1" } } -} +} \ No newline at end of file diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index eaff8c8504..58153b9fc1 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -379,8 +379,8 @@ importers: version: 4.5.7(@types/react@19.1.8)(immer@9.0.21)(react@19.1.0) devDependencies: '@antfu/eslint-config': - specifier: ^4.1.1 - version: 4.17.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + specifier: ^5.0.0 + version: 5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) '@chromatic-com/storybook': specifier: ^3.1.0 version: 3.2.7(react@19.1.0)(storybook@8.5.0) @@ -403,8 +403,8 @@ importers: specifier: ^15.4.1 version: 15.4.1 '@next/eslint-plugin-next': - specifier: ~15.3.5 - version: 15.3.5 + specifier: ~15.4.4 + version: 15.4.4 '@rgrove/parse-xml': specifier: ^4.1.0 version: 4.2.0 @@ -514,8 +514,8 @@ importers: specifier: ^9.20.1 version: 9.31.0(jiti@1.21.7) eslint-config-next: - specifier: ~15.3.5 - version: 15.3.5(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + specifier: ~15.4.4 + version: 15.4.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-oxlint: specifier: ^1.6.0 version: 1.6.0 @@ -571,8 +571,8 @@ importers: specifier: ^5.8.3 version: 5.8.3 typescript-eslint: - specifier: ^8.36.0 - version: 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + specifier: ^8.38.0 + version: 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) uglify-js: specifier: ^3.19.3 version: 3.19.3 @@ -590,11 +590,12 @@ packages: resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} engines: {node: '>=6.0.0'} - '@antfu/eslint-config@4.17.0': - resolution: {integrity: sha512-S1y0A1+0DcpV6GmjwB9gQCQc7ni9zlKa3MQRqRCEZ0E1WW+nRL1BUwnbk3DpMJAMsb3UIAt1lsAiIBnvIw2NDw==} + '@antfu/eslint-config@5.0.0': + resolution: {integrity: sha512-uAMv8PiW9BOAGmIyTDtWXGnNfv6PFV4DmpqmlUpST5k4bue38VRdIfnM4jvgPuny1xnjYX3flN3kB9++6LknMw==} hasBin: true peerDependencies: '@eslint-react/eslint-plugin': ^1.38.4 + '@next/eslint-plugin-next': ^15.4.0-canary.115 '@prettier/plugin-xml': ^3.4.1 '@unocss/eslint-plugin': '>=0.50.0' astro-eslint-parser: ^1.0.2 @@ -612,6 +613,8 @@ packages: peerDependenciesMeta: '@eslint-react/eslint-plugin': optional: true + '@next/eslint-plugin-next': + optional: true '@prettier/plugin-xml': optional: true '@unocss/eslint-plugin': @@ -1563,14 +1566,6 @@ packages: resolution: {integrity: sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/core@0.13.0': - resolution: {integrity: sha512-yfkgDw1KR66rkT5A8ci4irzDysN7FRpq3ttJolR88OqQikAWqwA8j5VZyas+vjyBNFIJ7MfybJ9plMILI2UrCw==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - - '@eslint/core@0.14.0': - resolution: {integrity: sha512-qIbV0/JZr7iSDjqAc60IqbLdsj9GDt16xQtWD+B78d/HAlvysGdZZ6rpJHGAc2T0FQx1X6thsSPdnoiGKdNtdg==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/core@0.15.1': resolution: {integrity: sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -1583,22 +1578,22 @@ packages: resolution: {integrity: sha512-LOm5OVt7D4qiKCqoiPbA7LWmI+tbw1VbTUowBcUMgQSuM6poJufkFkYDcQpo5KfgD39TnNySV26QjOh7VFpSyw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/markdown@7.0.0': - resolution: {integrity: sha512-0WNH6pSFHNlWSlNaIFQP0sLHpMUJw1FaJtyqapvGqOt0ISRgTUkTLVT0hT/zekDA1QlP2TT8pwjPkqYTu2s8yg==} + '@eslint/markdown@7.1.0': + resolution: {integrity: sha512-Y+X1B1j+/zupKDVJfkKc8uYMjQkGzfnd8lt7vK3y8x9Br6H5dBuhAfFrQ6ff7HAMm/1BwgecyEiRFkYCWPRxmA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} '@eslint/object-schema@2.1.6': resolution: {integrity: sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/plugin-kit@0.2.8': - resolution: {integrity: sha512-ZAoA40rNMPwSm+AeHpCq8STiNAwzWLJuP8Xv4CHIc9wv/PSuExjMrmjfYNj682vW0OOiZ1HKxzvjQr9XZIisQA==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/plugin-kit@0.3.3': resolution: {integrity: sha512-1+WqvgNMhmlAambTvT3KPtCl/Ibr68VldY2XY40SL1CE0ZXiakFR/cbTspaF5HsnpDMvcYYoJHfl4980NBjGag==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@eslint/plugin-kit@0.3.4': + resolution: {integrity: sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@faker-js/faker@9.9.0': resolution: {integrity: sha512-OEl393iCOoo/z8bMezRlJu+GlRGlsKbUAN7jKB6LhnKoqKve5DXRpalbItIIcwnCjs1k/FOPjFzcA6Qn+H+YbA==} engines: {node: '>=18.0.0', npm: '>=9.0.0'} @@ -2115,8 +2110,8 @@ packages: '@next/env@15.3.5': resolution: {integrity: sha512-7g06v8BUVtN2njAX/r8gheoVffhiKFVt4nx74Tt6G4Hqw9HCLYQVx/GkH2qHvPtAHZaUNZ0VXAa0pQP6v1wk7g==} - '@next/eslint-plugin-next@15.3.5': - resolution: {integrity: sha512-BZwWPGfp9po/rAnJcwUBaM+yT/+yTWIkWdyDwc74G9jcfTrNrmsHe+hXHljV066YNdVs8cxROxX5IgMQGX190w==} + '@next/eslint-plugin-next@15.4.4': + resolution: {integrity: sha512-1FDsyN//ai3Jd97SEd7scw5h1yLdzDACGOPRofr2GD3sEFsBylEEoL0MHSerd4n2dq9Zm/mFMqi4+NRMOreOKA==} '@next/mdx@15.3.5': resolution: {integrity: sha512-/2rRCgPKNp2ttQscU13auI+cYYACdPa80Okgi/1+NNJJeWn9yVxwGnqZc3SX30T889bZbLqcY4oUjqYGAygL4g==} @@ -2825,8 +2820,8 @@ packages: peerDependencies: storybook: ^8.2.0 || ^8.3.0-0 || ^8.4.0-0 || ^8.5.0-0 || ^8.6.0-0 - '@stylistic/eslint-plugin@5.2.0': - resolution: {integrity: sha512-RCEdbREv9EBiToUBQTlRhVYKG093I6ZnnQ990j08eJ6uRZh71DXkOnoxtTLfDQ6utVCVQzrhZFHZP0zfrfOIjA==} + '@stylistic/eslint-plugin@5.2.2': + resolution: {integrity: sha512-bE2DUjruqXlHYP3Q2Gpqiuj2bHq7/88FnuaS0FjeGGLCy+X6a07bGVuwtiOYnPSLHR6jmx5Bwdv+j7l8H+G97A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: '>=9.0.0' @@ -3219,16 +3214,16 @@ packages: '@types/yargs@17.0.33': resolution: {integrity: sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==} - '@typescript-eslint/eslint-plugin@8.37.0': - resolution: {integrity: sha512-jsuVWeIkb6ggzB+wPCsR4e6loj+rM72ohW6IBn2C+5NCvfUVY8s33iFPySSVXqtm5Hu29Ne/9bnA0JmyLmgenA==} + '@typescript-eslint/eslint-plugin@8.38.0': + resolution: {integrity: sha512-CPoznzpuAnIOl4nhj4tRr4gIPj5AfKgkiJmGQDaq+fQnRJTYlcBjbX3wbciGmpoPf8DREufuPRe1tNMZnGdanA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - '@typescript-eslint/parser': ^8.37.0 + '@typescript-eslint/parser': ^8.38.0 eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <5.9.0' - '@typescript-eslint/parser@8.37.0': - resolution: {integrity: sha512-kVIaQE9vrN9RLCQMQ3iyRlVJpTiDUY6woHGb30JDkfJErqrQEmtdWH3gV0PBAfGZgQXoqzXOO0T3K6ioApbbAA==} + '@typescript-eslint/parser@8.38.0': + resolution: {integrity: sha512-Zhy8HCvBUEfBECzIl1PKqF4p11+d0aUJS1GeUiuqK9WmOug8YCmC4h4bjyBvMyAMI9sbRczmrYL5lKg/YMbrcQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -3240,16 +3235,32 @@ packages: peerDependencies: typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/project-service@8.38.0': + resolution: {integrity: sha512-dbK7Jvqcb8c9QfH01YB6pORpqX1mn5gDZc9n63Ak/+jD67oWXn3Gs0M6vddAN+eDXBCS5EmNWzbSxsn9SzFWWg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/scope-manager@8.37.0': resolution: {integrity: sha512-0vGq0yiU1gbjKob2q691ybTg9JX6ShiVXAAfm2jGf3q0hdP6/BruaFjL/ManAR/lj05AvYCH+5bbVo0VtzmjOA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/scope-manager@8.38.0': + resolution: {integrity: sha512-WJw3AVlFFcdT9Ri1xs/lg8LwDqgekWXWhH3iAF+1ZM+QPd7oxQ6jvtW/JPwzAScxitILUIFs0/AnQ/UWHzbATQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/tsconfig-utils@8.37.0': resolution: {integrity: sha512-1/YHvAVTimMM9mmlPvTec9NP4bobA1RkDbMydxG8omqwJJLEW/Iy2C4adsAESIXU3WGLXFHSZUU+C9EoFWl4Zg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/tsconfig-utils@8.38.0': + resolution: {integrity: sha512-Lum9RtSE3EroKk/bYns+sPOodqb2Fv50XOl/gMviMKNvanETUuUcC9ObRbzrJ4VSd2JalPqgSAavwrPiPvnAiQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/type-utils@8.37.0': resolution: {integrity: sha512-SPkXWIkVZxhgwSwVq9rqj/4VFo7MnWwVaRNznfQDc/xPYHjXnPfLWn+4L6FF1cAz6e7dsqBeMawgl7QjUMj4Ow==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3257,16 +3268,33 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/type-utils@8.38.0': + resolution: {integrity: sha512-c7jAvGEZVf0ao2z+nnz8BUaHZD09Agbh+DY7qvBQqLiz8uJzRgVPj5YvOh8I8uEiH8oIUGIfHzMwUcGVco/SJg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/types@8.37.0': resolution: {integrity: sha512-ax0nv7PUF9NOVPs+lmQ7yIE7IQmAf8LGcXbMvHX5Gm+YJUYNAl340XkGnrimxZ0elXyoQJuN5sbg6C4evKA4SQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/types@8.38.0': + resolution: {integrity: sha512-wzkUfX3plUqij4YwWaJyqhiPE5UCRVlFpKn1oCRn2O1bJ592XxWJj8ROQ3JD5MYXLORW84063z3tZTb/cs4Tyw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/typescript-estree@8.37.0': resolution: {integrity: sha512-zuWDMDuzMRbQOM+bHyU4/slw27bAUEcKSKKs3hcv2aNnc/tvE/h7w60dwVw8vnal2Pub6RT1T7BI8tFZ1fE+yg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/typescript-estree@8.38.0': + resolution: {integrity: sha512-fooELKcAKzxux6fA6pxOflpNS0jc+nOQEEOipXFNjSlBS6fqrJOVY/whSn70SScHrcJ2LDsxWrneFoWYSVfqhQ==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/utils@8.37.0': resolution: {integrity: sha512-TSFvkIW6gGjN2p6zbXo20FzCABbyUAuq6tBvNRGsKdsSQ6a7rnV6ADfZ7f4iI3lIiXc4F4WWvtUfDw9CJ9pO5A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -3274,10 +3302,21 @@ packages: eslint: ^8.57.0 || ^9.0.0 typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/utils@8.38.0': + resolution: {integrity: sha512-hHcMA86Hgt+ijJlrD8fX0j1j8w4C92zue/8LOPAFioIno+W0+L7KqE8QZKCcPGc/92Vs9x36w/4MPTJhqXdyvg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <5.9.0' + '@typescript-eslint/visitor-keys@8.37.0': resolution: {integrity: sha512-YzfhzcTnZVPiLfP/oeKtDp2evwvHLMe0LOy7oe+hb9KKIumLNohYS9Hgp1ifwpu42YWxhZE8yieggz6JpqO/1w==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/visitor-keys@8.38.0': + resolution: {integrity: sha512-pWrTcoFNWuwHlA9CvlfSsGWs14JxfN1TH25zM5L7o0pRLhsoZkDnTsXfQRJBEWJoV5DL0jf+Z+sxiud+K0mq1g==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@ungap/structured-clone@1.3.0': resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} @@ -3876,6 +3915,9 @@ packages: resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==} engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + change-case@5.4.4: + resolution: {integrity: sha512-HRQyTk2/YPEkt9TnUPbOpr64Uw3KOicFWPVBb+xiHvd6eBx/qPr9xqfBFDT8P2vWsvvz4jbEkfDe71W3VyNu2w==} + char-regex@1.0.2: resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==} engines: {node: '>=10'} @@ -4649,8 +4691,8 @@ packages: peerDependencies: eslint: ^9.5.0 - eslint-config-next@15.3.5: - resolution: {integrity: sha512-oQdvnIgP68wh2RlR3MdQpvaJ94R6qEFl+lnu8ZKxPj5fsAHrSF/HlAOZcsimLw3DT6bnEQIUdbZC2Ab6sWyptg==} + eslint-config-next@15.4.4: + resolution: {integrity: sha512-sK/lWLUVF5om18O5w76Jt3F8uzu/LP5mVa6TprCMWkjWHUmByq80iHGHcdH7k1dLiJlj+DRIWf98d5piwRsSuA==} peerDependencies: eslint: ^7.23.0 || ^8.0.0 || ^9.0.0 typescript: '>=3.3.1' @@ -4787,8 +4829,8 @@ packages: peerDependencies: eslint: '>=8.45.0' - eslint-plugin-pnpm@1.0.0: - resolution: {integrity: sha512-tyEA10k7psB9HFCx8R4/bU4JS2tSKfXaCnrCcis+1R4FucfMIc6HgcFl4msZbwY2I0D9Vec3xAEkXV0aPechhQ==} + eslint-plugin-pnpm@1.1.0: + resolution: {integrity: sha512-sL93w0muBtjnogzk/loDsxzMbmXQOLP5Blw3swLDBXZgfb+qQI73bPcUbjVR+ZL+K62vGJdErV+43i3r5DsZPg==} peerDependencies: eslint: ^9.0.0 @@ -4901,11 +4943,11 @@ packages: peerDependencies: eslint: '>=6.0.0' - eslint-plugin-unicorn@59.0.1: - resolution: {integrity: sha512-EtNXYuWPUmkgSU2E7Ttn57LbRREQesIP1BiLn7OZLKodopKfDXfBUkC/0j6mpw2JExwf43Uf3qLSvrSvppgy8Q==} - engines: {node: ^18.20.0 || ^20.10.0 || >=21.0.0} + eslint-plugin-unicorn@60.0.0: + resolution: {integrity: sha512-QUzTefvP8stfSXsqKQ+vBQSEsXIlAiCduS/V1Em+FKgL9c21U/IIm20/e3MFy1jyCf14tHAhqC1sX8OTy6VUCg==} + engines: {node: ^20.10.0 || >=21.0.0} peerDependencies: - eslint: '>=9.22.0' + eslint: '>=9.29.0' eslint-plugin-unused-imports@4.1.4: resolution: {integrity: sha512-YptD6IzQjDardkl0POxnnRBhU1OEePMV0nd6siHaRBbd+lyh6NAhFEobiznKU7kTsSsDeSD62Pe7kAM1b7dAZQ==} @@ -6691,8 +6733,8 @@ packages: resolution: {integrity: sha512-2Rb3vm+EXble/sMXNSu6eoBx8e79gKqhNq9F5ZWW6ERNCTE/Q0wQNne5541tE5vKjfM8hpNCYL+LGc1YTfI0dg==} engines: {node: '>=6'} - pnpm-workspace-yaml@1.0.0: - resolution: {integrity: sha512-2RKg3khFgX/oeKIQnxxlj+OUoKbaZjBt7EsmQiLfl8AHZKMIpLmXLRPptZ5eq2Rlumh2gILs6OWNky5dzP+f8A==} + pnpm-workspace-yaml@1.1.0: + resolution: {integrity: sha512-OWUzBxtitpyUV0fBYYwLAfWxn3mSzVbVB7cwgNaHvTTU9P0V2QHjyaY5i7f1hEiT9VeKsNH1Skfhe2E3lx/zhA==} points-on-curve@0.2.0: resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} @@ -7829,8 +7871,8 @@ packages: resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} engines: {node: '>=12.20'} - typescript-eslint@8.37.0: - resolution: {integrity: sha512-TnbEjzkE9EmcO0Q2zM+GE8NQLItNAJpMmED1BdgoBMYNdqMhzlbqfdSwiRlAzEK2pA9UzVW0gzaaIzXWg2BjfA==} + typescript-eslint@8.38.0: + resolution: {integrity: sha512-FsZlrYK6bPDGoLeZRuvx2v6qrM03I0U0SnfCLPs/XCCPCFD80xU9Pg09H/K+XFa68uJuZo7l/Xhs+eDRg2l3hg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: eslint: ^8.57.0 || ^9.0.0 @@ -8243,15 +8285,15 @@ snapshots: '@jridgewell/gen-mapping': 0.3.12 '@jridgewell/trace-mapping': 0.3.29 - '@antfu/eslint-config@4.17.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@antfu/eslint-config@5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@antfu/install-pkg': 1.1.0 '@clack/prompts': 0.11.0 '@eslint-community/eslint-plugin-eslint-comments': 4.5.0(eslint@9.31.0(jiti@1.21.7)) - '@eslint/markdown': 7.0.0 - '@stylistic/eslint-plugin': 5.2.0(eslint@9.31.0(jiti@1.21.7)) - '@typescript-eslint/eslint-plugin': 8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint/markdown': 7.1.0 + '@stylistic/eslint-plugin': 5.2.2(eslint@9.31.0(jiti@1.21.7)) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) '@vitest/eslint-plugin': 1.3.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) ansis: 4.1.0 cac: 6.7.14 @@ -8267,12 +8309,12 @@ snapshots: eslint-plugin-n: 17.21.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-no-only-tests: 3.3.0 eslint-plugin-perfectionist: 4.15.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-pnpm: 1.0.0(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-pnpm: 1.1.0(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-regexp: 2.9.0(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-toml: 0.12.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-unicorn: 59.0.1(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))) + eslint-plugin-unicorn: 60.0.0(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))) eslint-plugin-yml: 1.18.0(eslint@9.31.0(jiti@1.21.7)) eslint-processor-vue-blocks: 2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.31.0(jiti@1.21.7)) globals: 16.3.0 @@ -8284,6 +8326,7 @@ snapshots: yaml-eslint-parser: 1.3.0 optionalDependencies: '@eslint-react/eslint-plugin': 1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + '@next/eslint-plugin-next': 15.4.4 eslint-plugin-react-hooks: 5.2.0(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-react-refresh: 0.4.20(eslint@9.31.0(jiti@1.21.7)) transitivePeerDependencies: @@ -9192,7 +9235,7 @@ snapshots: '@es-joy/jsdoccomment@0.50.2': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.37.0 + '@typescript-eslint/types': 8.38.0 comment-parser: 1.4.1 esquery: 1.6.0 jsdoc-type-pratt-parser: 4.1.0 @@ -9200,7 +9243,7 @@ snapshots: '@es-joy/jsdoccomment@0.52.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.37.0 + '@typescript-eslint/types': 8.38.0 comment-parser: 1.4.1 esquery: 1.6.0 jsdoc-type-pratt-parser: 4.1.0 @@ -9399,14 +9442,6 @@ snapshots: '@eslint/config-helpers@0.3.0': {} - '@eslint/core@0.13.0': - dependencies: - '@types/json-schema': 7.0.15 - - '@eslint/core@0.14.0': - dependencies: - '@types/json-schema': 7.0.15 - '@eslint/core@0.15.1': dependencies: '@types/json-schema': 7.0.15 @@ -9427,10 +9462,10 @@ snapshots: '@eslint/js@9.31.0': {} - '@eslint/markdown@7.0.0': + '@eslint/markdown@7.1.0': dependencies: - '@eslint/core': 0.14.0 - '@eslint/plugin-kit': 0.3.3 + '@eslint/core': 0.15.1 + '@eslint/plugin-kit': 0.3.4 github-slugger: 2.0.0 mdast-util-from-markdown: 2.0.2 mdast-util-frontmatter: 2.0.1 @@ -9442,12 +9477,12 @@ snapshots: '@eslint/object-schema@2.1.6': {} - '@eslint/plugin-kit@0.2.8': + '@eslint/plugin-kit@0.3.3': dependencies: - '@eslint/core': 0.13.0 + '@eslint/core': 0.15.1 levn: 0.4.1 - '@eslint/plugin-kit@0.3.3': + '@eslint/plugin-kit@0.3.4': dependencies: '@eslint/core': 0.15.1 levn: 0.4.1 @@ -10151,7 +10186,7 @@ snapshots: '@next/env@15.3.5': {} - '@next/eslint-plugin-next@15.3.5': + '@next/eslint-plugin-next@15.4.4': dependencies: fast-glob: 3.3.1 @@ -11003,10 +11038,10 @@ snapshots: dependencies: storybook: 8.5.0 - '@stylistic/eslint-plugin@5.2.0(eslint@9.31.0(jiti@1.21.7))': + '@stylistic/eslint-plugin@5.2.2(eslint@9.31.0(jiti@1.21.7))': dependencies: '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) - '@typescript-eslint/types': 8.37.0 + '@typescript-eslint/types': 8.38.0 eslint: 9.31.0(jiti@1.21.7) eslint-visitor-keys: 4.2.1 espree: 10.4.0 @@ -11441,14 +11476,14 @@ snapshots: dependencies: '@types/yargs-parser': 21.0.3 - '@typescript-eslint/eslint-plugin@8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/visitor-keys': 8.37.0 + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/scope-manager': 8.38.0 + '@typescript-eslint/type-utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/visitor-keys': 8.38.0 eslint: 9.31.0(jiti@1.21.7) graphemer: 1.4.0 ignore: 7.0.5 @@ -11458,12 +11493,12 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - '@typescript-eslint/visitor-keys': 8.37.0 + '@typescript-eslint/scope-manager': 8.38.0 + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) + '@typescript-eslint/visitor-keys': 8.38.0 debug: 4.4.1 eslint: 9.31.0(jiti@1.21.7) typescript: 5.8.3 @@ -11479,15 +11514,33 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/project-service@8.38.0(typescript@5.8.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.38.0(typescript@5.8.3) + '@typescript-eslint/types': 8.38.0 + debug: 4.4.1 + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/scope-manager@8.37.0': dependencies: '@typescript-eslint/types': 8.37.0 '@typescript-eslint/visitor-keys': 8.37.0 + '@typescript-eslint/scope-manager@8.38.0': + dependencies: + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/visitor-keys': 8.38.0 + '@typescript-eslint/tsconfig-utils@8.37.0(typescript@5.8.3)': dependencies: typescript: 5.8.3 + '@typescript-eslint/tsconfig-utils@8.38.0(typescript@5.8.3)': + dependencies: + typescript: 5.8.3 + '@typescript-eslint/type-utils@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 8.37.0 @@ -11500,8 +11553,22 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/type-utils@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + dependencies: + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + debug: 4.4.1 + eslint: 9.31.0(jiti@1.21.7) + ts-api-utils: 2.1.0(typescript@5.8.3) + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/types@8.37.0': {} + '@typescript-eslint/types@8.38.0': {} + '@typescript-eslint/typescript-estree@8.37.0(typescript@5.8.3)': dependencies: '@typescript-eslint/project-service': 8.37.0(typescript@5.8.3) @@ -11518,6 +11585,22 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/typescript-estree@8.38.0(typescript@5.8.3)': + dependencies: + '@typescript-eslint/project-service': 8.38.0(typescript@5.8.3) + '@typescript-eslint/tsconfig-utils': 8.38.0(typescript@5.8.3) + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/visitor-keys': 8.38.0 + debug: 4.4.1 + fast-glob: 3.3.3 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.7.2 + ts-api-utils: 2.1.0(typescript@5.8.3) + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/utils@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) @@ -11529,11 +11612,27 @@ snapshots: transitivePeerDependencies: - supports-color + '@typescript-eslint/utils@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + dependencies: + '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@typescript-eslint/scope-manager': 8.38.0 + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) + eslint: 9.31.0(jiti@1.21.7) + typescript: 5.8.3 + transitivePeerDependencies: + - supports-color + '@typescript-eslint/visitor-keys@8.37.0': dependencies: '@typescript-eslint/types': 8.37.0 eslint-visitor-keys: 4.2.1 + '@typescript-eslint/visitor-keys@8.38.0': + dependencies: + '@typescript-eslint/types': 8.38.0 + eslint-visitor-keys: 4.2.1 + '@ungap/structured-clone@1.3.0': {} '@unrs/resolver-binding-android-arm-eabi@1.11.1': @@ -11597,7 +11696,7 @@ snapshots: '@vitest/eslint-plugin@1.3.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint: 9.31.0(jiti@1.21.7) optionalDependencies: typescript: 5.8.3 @@ -12187,6 +12286,8 @@ snapshots: chalk@5.4.1: {} + change-case@5.4.4: {} + char-regex@1.0.2: {} character-entities-html4@2.1.0: {} @@ -12990,16 +13091,16 @@ snapshots: '@eslint/compat': 1.3.1(eslint@9.31.0(jiti@1.21.7)) eslint: 9.31.0(jiti@1.21.7) - eslint-config-next@15.3.5(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-config-next@15.4.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@next/eslint-plugin-next': 15.3.5 + '@next/eslint-plugin-next': 15.4.4 '@rushstack/eslint-patch': 1.12.0 - '@typescript-eslint/eslint-plugin': 8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint: 9.31.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-jsx-a11y: 6.10.2(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-react: 7.37.5(eslint@9.31.0(jiti@1.21.7)) eslint-plugin-react-hooks: 5.2.0(eslint@9.31.0(jiti@1.21.7)) @@ -13033,7 +13134,7 @@ snapshots: tinyglobby: 0.2.14 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) transitivePeerDependencies: - supports-color @@ -13047,11 +13148,11 @@ snapshots: dependencies: eslint: 9.31.0(jiti@1.21.7) - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint: 9.31.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.31.0(jiti@1.21.7)) @@ -13077,12 +13178,12 @@ snapshots: eslint-plugin-import-lite@0.3.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): dependencies: '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) - '@typescript-eslint/types': 8.37.0 + '@typescript-eslint/types': 8.38.0 eslint: 9.31.0(jiti@1.21.7) optionalDependencies: typescript: 5.8.3 - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): dependencies: '@rtsao/scc': 1.1.0 array-includes: '@nolyfill/array-includes@1.0.44' @@ -13093,7 +13194,7 @@ snapshots: doctrine: 2.1.0 eslint: 9.31.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) hasown: '@nolyfill/hasown@1.0.44' is-core-module: '@nolyfill/is-core-module@1.0.39' is-glob: 4.0.3 @@ -13105,7 +13206,7 @@ snapshots: string.prototype.trimend: '@nolyfill/string.prototype.trimend@1.0.44' tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -13183,21 +13284,21 @@ snapshots: eslint-plugin-perfectionist@4.15.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/types': 8.38.0 + '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint: 9.31.0(jiti@1.21.7) natural-orderby: 5.0.0 transitivePeerDependencies: - supports-color - typescript - eslint-plugin-pnpm@1.0.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-pnpm@1.1.0(eslint@9.31.0(jiti@1.21.7)): dependencies: eslint: 9.31.0(jiti@1.21.7) find-up-simple: 1.0.1 jsonc-eslint-parser: 2.4.0 pathe: 2.0.3 - pnpm-workspace-yaml: 1.0.0 + pnpm-workspace-yaml: 1.1.0 tinyglobby: 0.2.14 yaml-eslint-parser: 1.3.0 @@ -13404,11 +13505,12 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-unicorn@59.0.1(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-unicorn@60.0.0(eslint@9.31.0(jiti@1.21.7)): dependencies: '@babel/helper-validator-identifier': 7.27.1 '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) - '@eslint/plugin-kit': 0.2.8 + '@eslint/plugin-kit': 0.3.4 + change-case: 5.4.4 ci-info: 4.3.0 clean-regexp: 1.0.0 core-js-compat: 3.44.0 @@ -13425,13 +13527,13 @@ snapshots: semver: 7.7.2 strip-indent: 4.0.0 - eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)): dependencies: eslint: 9.31.0(jiti@1.21.7) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))): + eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))): dependencies: '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) eslint: 9.31.0(jiti@1.21.7) @@ -13442,7 +13544,7 @@ snapshots: vue-eslint-parser: 10.2.0(eslint@9.31.0(jiti@1.21.7)) xml-name-validator: 4.0.0 optionalDependencies: - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-yml@1.18.0(eslint@9.31.0(jiti@1.21.7)): dependencies: @@ -15832,7 +15934,7 @@ snapshots: transitivePeerDependencies: - typescript - pnpm-workspace-yaml@1.0.0: + pnpm-workspace-yaml@1.1.0: dependencies: yaml: 2.8.0 @@ -17106,12 +17208,12 @@ snapshots: type-fest@2.19.0: {} - typescript-eslint@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + typescript-eslint@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.37.0(@typescript-eslint/parser@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) eslint: 9.31.0(jiti@1.21.7) typescript: 5.8.3 transitivePeerDependencies: From f72c03a174ba7be4440f88e645f78f7418e85403 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Mon, 28 Jul 2025 13:59:34 +0800 Subject: [PATCH 027/415] feat: Support selecting variables in conditional filtering in list operations. (#23029) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: crazywoola <427733928@qq.com> --- api/core/workflow/nodes/list_operator/node.py | 2 +- .../components/filter-condition.tsx | 72 +++++++++++++++---- .../workflow/nodes/list-operator/panel.tsx | 1 + 3 files changed, 61 insertions(+), 14 deletions(-) diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index b91fc622f6..d2e022dc9d 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -299,7 +299,7 @@ def _endswith(value: str) -> Callable[[str], bool]: def _is(value: str) -> Callable[[str], bool]: - return lambda x: x is value + return lambda x: x == value def _in(value: str | Sequence[str]) -> Callable[[str], bool]: diff --git a/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx b/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx index 0c261a70d6..a7ea6d78e7 100644 --- a/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx +++ b/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx @@ -1,36 +1,60 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useMemo } from 'react' +import React, { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import ConditionOperator from '../../if-else/components/condition-list/condition-operator' -import { VarType } from '../../../types' import type { Condition } from '../types' import { ComparisonOperator } from '../../if-else/types' import { comparisonOperatorNotRequireValue, getOperators } from '../../if-else/utils' import SubVariablePicker from './sub-variable-picker' -import Input from '@/app/components/base/input' import { FILE_TYPE_OPTIONS, TRANSFER_METHOD } from '@/app/components/workflow/nodes/constants' import { SimpleSelect as Select } from '@/app/components/base/select' +import Input from '@/app/components/workflow/nodes/_base/components/input-support-select-var' +import useAvailableVarList from '@/app/components/workflow/nodes/_base/hooks/use-available-var-list' +import cn from '@/utils/classnames' +import { VarType } from '../../../types' const optionNameI18NPrefix = 'workflow.nodes.ifElse.optionName' + +const VAR_INPUT_SUPPORTED_KEYS: Record = { + name: VarType.string, + url: VarType.string, + extension: VarType.string, + mime_type: VarType.string, + related_id: VarType.number, +} + type Props = { condition: Condition onChange: (condition: Condition) => void - varType: VarType hasSubVariable: boolean readOnly: boolean + nodeId: string } const FilterCondition: FC = ({ condition = { key: '', comparison_operator: ComparisonOperator.equal, value: '' }, - varType, onChange, hasSubVariable, readOnly, + nodeId, }) => { const { t } = useTranslation() + const [isFocus, setIsFocus] = useState(false) + + const expectedVarType = VAR_INPUT_SUPPORTED_KEYS[condition.key] + const supportVariableInput = !!expectedVarType + + const { availableVars, availableNodesWithParent } = useAvailableVarList(nodeId, { + onlyLeafNodeVar: false, + filterVar: (varPayload) => { + return expectedVarType ? varPayload.type === expectedVarType : true + }, + }) + const isSelect = [ComparisonOperator.in, ComparisonOperator.notIn, ComparisonOperator.allOf].includes(condition.comparison_operator) const isArrayValue = condition.key === 'transfer_method' || condition.key === 'type' + const selectOptions = useMemo(() => { if (isSelect) { if (condition.key === 'type' || condition.comparison_operator === ComparisonOperator.allOf) { @@ -49,6 +73,7 @@ const FilterCondition: FC = ({ } return [] }, [condition.comparison_operator, condition.key, isSelect, t]) + const handleChange = useCallback((key: string) => { return (value: any) => { onChange({ @@ -59,12 +84,14 @@ const FilterCondition: FC = ({ }, [condition, onChange, isArrayValue]) const handleSubVariableChange = useCallback((value: string) => { + const operators = getOperators(expectedVarType ?? VarType.string, { key: value }) + const newOperator = operators.length > 0 ? operators[0] : ComparisonOperator.equal onChange({ key: value, - comparison_operator: getOperators(varType, { key: value })[0], + comparison_operator: newOperator, value: '', }) - }, [onChange, varType]) + }, [onChange, expectedVarType]) return (
@@ -78,7 +105,7 @@ const FilterCondition: FC = ({
= ({ /> {!comparisonOperatorNotRequireValue(condition.comparison_operator) && ( <> - {isSelect && ( + {isSelect ? ( + ) : ( + handleChange('value')(e.target.value)} + readOnly={readOnly} /> )} @@ -110,4 +155,5 @@ const FilterCondition: FC = ({
) } + export default React.memo(FilterCondition) diff --git a/web/app/components/workflow/nodes/list-operator/panel.tsx b/web/app/components/workflow/nodes/list-operator/panel.tsx index d93a79397d..9a89629f09 100644 --- a/web/app/components/workflow/nodes/list-operator/panel.tsx +++ b/web/app/components/workflow/nodes/list-operator/panel.tsx @@ -78,6 +78,7 @@ const Panel: FC> = ({ varType={itemVarType} hasSubVariable={hasSubVariable} readOnly={readOnly} + nodeId={id} /> ) : null} From 3f8fb18c898fbbbbadfff87d9a2c0aef54d86c15 Mon Sep 17 00:00:00 2001 From: Tianyi Jing Date: Mon, 28 Jul 2025 14:07:51 +0800 Subject: [PATCH 028/415] fix: delete the old provider_config_cache after refresh_credentials (#23033) Signed-off-by: jingfelix --- api/core/tools/tool_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 6b06cc7f10..1bb4cfa4cd 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -237,7 +237,7 @@ class ToolManager: if builtin_provider is None: raise ToolProviderNotFoundError(f"builtin provider {provider_id} not found") - encrypter, _ = create_provider_encrypter( + encrypter, cache = create_provider_encrypter( tenant_id=tenant_id, config=[ x.to_basic_provider_config() @@ -281,6 +281,7 @@ class ToolManager: builtin_provider.expires_at = refreshed_credentials.expires_at db.session.commit() decrypted_credentials = refreshed_credentials.credentials + cache.delete() return cast( BuiltinTool, From 5c5f61b2aaeeb048deaa5730222a7591d000b600 Mon Sep 17 00:00:00 2001 From: zhaobingshuang <1475195565@qq.com> Date: Mon, 28 Jul 2025 14:24:13 +0800 Subject: [PATCH 029/415] fix(dataset): CELERY_BROKER uses amqp rabbitmq. When adding document segments in batches and uploading large files, the status will always remain stuck at "In batch processing" #22709 (#23038) --- .../console/datasets/datasets_segments.py | 35 ++---- .../batch_create_segment_to_index_task.py | 34 ++++- .../detail/batch-modal/csv-uploader.tsx | 119 +++++++++++++++++- .../documents/detail/batch-modal/index.tsx | 10 +- .../datasets/documents/detail/index.tsx | 8 +- web/service/knowledge/use-segment.ts | 4 +- 6 files changed, 166 insertions(+), 44 deletions(-) diff --git a/api/controllers/console/datasets/datasets_segments.py b/api/controllers/console/datasets/datasets_segments.py index acb2265309..8c429044d7 100644 --- a/api/controllers/console/datasets/datasets_segments.py +++ b/api/controllers/console/datasets/datasets_segments.py @@ -1,6 +1,5 @@ import uuid -import pandas as pd from flask import request from flask_login import current_user from flask_restful import Resource, marshal, reqparse @@ -14,8 +13,6 @@ from controllers.console.datasets.error import ( ChildChunkDeleteIndexError, ChildChunkIndexingError, InvalidActionError, - NoFileUploadedError, - TooManyFilesError, ) from controllers.console.wraps import ( account_initialization_required, @@ -32,6 +29,7 @@ from extensions.ext_redis import redis_client from fields.segment_fields import child_chunk_fields, segment_fields from libs.login import login_required from models.dataset import ChildChunk, DocumentSegment +from models.model import UploadFile from services.dataset_service import DatasetService, DocumentService, SegmentService from services.entities.knowledge_entities.knowledge_entities import ChildChunkUpdateArgs, SegmentUpdateArgs from services.errors.chunk import ChildChunkDeleteIndexError as ChildChunkDeleteIndexServiceError @@ -365,37 +363,28 @@ class DatasetDocumentSegmentBatchImportApi(Resource): document = DocumentService.get_document(dataset_id, document_id) if not document: raise NotFound("Document not found.") - # get file from request - file = request.files["file"] - # check file - if "file" not in request.files: - raise NoFileUploadedError() - if len(request.files) > 1: - raise TooManyFilesError() + parser = reqparse.RequestParser() + parser.add_argument("upload_file_id", type=str, required=True, nullable=False, location="json") + args = parser.parse_args() + upload_file_id = args["upload_file_id"] + + upload_file = db.session.query(UploadFile).where(UploadFile.id == upload_file_id).first() + if not upload_file: + raise NotFound("UploadFile not found.") + # check file type - if not file.filename or not file.filename.lower().endswith(".csv"): + if not upload_file.name or not upload_file.name.lower().endswith(".csv"): raise ValueError("Invalid file type. Only CSV files are allowed") try: - # Skip the first row - df = pd.read_csv(file) - result = [] - for index, row in df.iterrows(): - if document.doc_form == "qa_model": - data = {"content": row.iloc[0], "answer": row.iloc[1]} - else: - data = {"content": row.iloc[0]} - result.append(data) - if len(result) == 0: - raise ValueError("The CSV file is empty.") # async job job_id = str(uuid.uuid4()) indexing_cache_key = f"segment_batch_import_{str(job_id)}" # send batch add segments task redis_client.setnx(indexing_cache_key, "waiting") batch_create_segment_to_index_task.delay( - str(job_id), result, dataset_id, document_id, current_user.current_tenant_id, current_user.id + str(job_id), upload_file_id, dataset_id, document_id, current_user.current_tenant_id, current_user.id ) except Exception as e: return {"error": str(e)}, 500 diff --git a/api/tasks/batch_create_segment_to_index_task.py b/api/tasks/batch_create_segment_to_index_task.py index d72e350299..714e30acc3 100644 --- a/api/tasks/batch_create_segment_to_index_task.py +++ b/api/tasks/batch_create_segment_to_index_task.py @@ -1,9 +1,12 @@ import datetime import logging +import tempfile import time import uuid +from pathlib import Path import click +import pandas as pd from celery import shared_task # type: ignore from sqlalchemy import func from sqlalchemy.orm import Session @@ -12,15 +15,17 @@ from core.model_manager import ModelManager from core.model_runtime.entities.model_entities import ModelType from extensions.ext_database import db from extensions.ext_redis import redis_client +from extensions.ext_storage import storage from libs import helper from models.dataset import Dataset, Document, DocumentSegment +from models.model import UploadFile from services.vector_service import VectorService @shared_task(queue="dataset") def batch_create_segment_to_index_task( job_id: str, - content: list, + upload_file_id: str, dataset_id: str, document_id: str, tenant_id: str, @@ -29,13 +34,13 @@ def batch_create_segment_to_index_task( """ Async batch create segment to index :param job_id: - :param content: + :param upload_file_id: :param dataset_id: :param document_id: :param tenant_id: :param user_id: - Usage: batch_create_segment_to_index_task.delay(job_id, content, dataset_id, document_id, tenant_id, user_id) + Usage: batch_create_segment_to_index_task.delay(job_id, upload_file_id, dataset_id, document_id, tenant_id, user_id) """ logging.info(click.style(f"Start batch create segment jobId: {job_id}", fg="green")) start_at = time.perf_counter() @@ -58,6 +63,29 @@ def batch_create_segment_to_index_task( or dataset_document.indexing_status != "completed" ): raise ValueError("Document is not available.") + + upload_file = session.get(UploadFile, upload_file_id) + if not upload_file: + raise ValueError("UploadFile not found.") + + with tempfile.TemporaryDirectory() as temp_dir: + suffix = Path(upload_file.key).suffix + # FIXME mypy: Cannot determine type of 'tempfile._get_candidate_names' better not use it here + file_path = f"{temp_dir}/{next(tempfile._get_candidate_names())}{suffix}" # type: ignore + storage.download(upload_file.key, file_path) + + # Skip the first row + df = pd.read_csv(file_path) + content = [] + for index, row in df.iterrows(): + if dataset_document.doc_form == "qa_model": + data = {"content": row.iloc[0], "answer": row.iloc[1]} + else: + data = {"content": row.iloc[0]} + content.append(data) + if len(content) == 0: + raise ValueError("The CSV file is empty.") + document_segments = [] embedding_model = None if dataset.indexing_technique == "high_quality": diff --git a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx index c2224296d6..c352f11d7f 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/csv-uploader.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React, { useEffect, useRef, useState } from 'react' +import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { RiDeleteBinLine, } from '@remixicon/react' @@ -10,10 +10,17 @@ import cn from '@/utils/classnames' import { Csv as CSVIcon } from '@/app/components/base/icons/src/public/files' import { ToastContext } from '@/app/components/base/toast' import Button from '@/app/components/base/button' +import type { FileItem } from '@/models/datasets' +import { upload } from '@/service/base' +import useSWR from 'swr' +import { fetchFileUploadConfig } from '@/service/common' +import SimplePieChart from '@/app/components/base/simple-pie-chart' +import { Theme } from '@/types/app' +import useTheme from '@/hooks/use-theme' export type Props = { - file: File | undefined - updateFile: (file?: File) => void + file: FileItem | undefined + updateFile: (file?: FileItem) => void } const CSVUploader: FC = ({ @@ -26,6 +33,68 @@ const CSVUploader: FC = ({ const dropRef = useRef(null) const dragRef = useRef(null) const fileUploader = useRef(null) + const { data: fileUploadConfigResponse } = useSWR({ url: '/files/upload' }, fetchFileUploadConfig) + const fileUploadConfig = useMemo(() => fileUploadConfigResponse ?? { + file_size_limit: 15, + }, [fileUploadConfigResponse]) + + const fileUpload = useCallback(async (fileItem: FileItem): Promise => { + fileItem.progress = 0 + + const formData = new FormData() + formData.append('file', fileItem.file) + const onProgress = (e: ProgressEvent) => { + if (e.lengthComputable) { + const progress = Math.floor(e.loaded / e.total * 100) + updateFile({ + ...fileItem, + progress, + }) + } + } + + return upload({ + xhr: new XMLHttpRequest(), + data: formData, + onprogress: onProgress, + }, false, undefined, '?source=datasets') + .then((res: File) => { + const completeFile = { + fileID: fileItem.fileID, + file: res, + progress: 100, + } + updateFile(completeFile) + return Promise.resolve({ ...completeFile }) + }) + .catch((e) => { + notify({ type: 'error', message: e?.response?.code === 'forbidden' ? e?.response?.message : t('datasetCreation.stepOne.uploader.failed') }) + const errorFile = { + ...fileItem, + progress: -2, + } + updateFile(errorFile) + return Promise.resolve({ ...errorFile }) + }) + .finally() + }, [notify, t, updateFile]) + + const uploadFile = useCallback(async (fileItem: FileItem) => { + await fileUpload(fileItem) + }, [fileUpload]) + + const initialUpload = useCallback((file?: File) => { + if (!file) + return false + + const newFile: FileItem = { + fileID: `file0-${Date.now()}`, + file, + progress: -1, + } + updateFile(newFile) + uploadFile(newFile) + }, [updateFile, uploadFile]) const handleDragEnter = (e: DragEvent) => { e.preventDefault() @@ -52,7 +121,7 @@ const CSVUploader: FC = ({ notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.count') }) return } - updateFile(files[0]) + initialUpload(files[0]) } const selectHandle = () => { if (fileUploader.current) @@ -63,11 +132,43 @@ const CSVUploader: FC = ({ fileUploader.current.value = '' updateFile() } + + const getFileType = (currentFile: File) => { + if (!currentFile) + return '' + + const arr = currentFile.name.split('.') + return arr[arr.length - 1] + } + + const isValid = useCallback((file?: File) => { + if (!file) + return false + + const { size } = file + const ext = `.${getFileType(file)}` + const isValidType = ext.toLowerCase() === '.csv' + if (!isValidType) + notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.typeError') }) + + const isValidSize = size <= fileUploadConfig.file_size_limit * 1024 * 1024 + if (!isValidSize) + notify({ type: 'error', message: t('datasetCreation.stepOne.uploader.validation.size', { size: fileUploadConfig.file_size_limit }) }) + + return isValidType && isValidSize + }, [fileUploadConfig, notify, t]) + const fileChangeHandle = (e: React.ChangeEvent) => { const currentFile = e.target.files?.[0] - updateFile(currentFile) + if (!isValid(currentFile)) + return + + initialUpload(currentFile) } + const { theme } = useTheme() + const chartColor = useMemo(() => theme === Theme.dark ? '#5289ff' : '#296dff', [theme]) + useEffect(() => { dropRef.current?.addEventListener('dragenter', handleDragEnter) dropRef.current?.addEventListener('dragover', handleDragOver) @@ -108,10 +209,16 @@ const CSVUploader: FC = ({
- {file.name.replace(/.csv$/, '')} + {file.file.name.replace(/.csv$/, '')} .csv
+ {(file.progress < 100 && file.progress >= 0) && ( + <> + +
+ + )}
diff --git a/web/app/components/datasets/documents/detail/batch-modal/index.tsx b/web/app/components/datasets/documents/detail/batch-modal/index.tsx index 614471c565..0952a823b4 100644 --- a/web/app/components/datasets/documents/detail/batch-modal/index.tsx +++ b/web/app/components/datasets/documents/detail/batch-modal/index.tsx @@ -7,14 +7,14 @@ import CSVUploader from './csv-uploader' import CSVDownloader from './csv-downloader' import Button from '@/app/components/base/button' import Modal from '@/app/components/base/modal' -import type { ChunkingMode } from '@/models/datasets' +import type { ChunkingMode, FileItem } from '@/models/datasets' import { noop } from 'lodash-es' export type IBatchModalProps = { isShow: boolean docForm: ChunkingMode onCancel: () => void - onConfirm: (file: File) => void + onConfirm: (file: FileItem) => void } const BatchModal: FC = ({ @@ -24,8 +24,8 @@ const BatchModal: FC = ({ onConfirm, }) => { const { t } = useTranslation() - const [currentCSV, setCurrentCSV] = useState() - const handleFile = (file?: File) => setCurrentCSV(file) + const [currentCSV, setCurrentCSV] = useState() + const handleFile = (file?: FileItem) => setCurrentCSV(file) const handleSend = () => { if (!currentCSV) @@ -56,7 +56,7 @@ const BatchModal: FC = ({ -
diff --git a/web/app/components/datasets/documents/detail/index.tsx b/web/app/components/datasets/documents/detail/index.tsx index aff74038e3..79d12e47e3 100644 --- a/web/app/components/datasets/documents/detail/index.tsx +++ b/web/app/components/datasets/documents/detail/index.tsx @@ -17,7 +17,7 @@ import cn from '@/utils/classnames' import Divider from '@/app/components/base/divider' import Loading from '@/app/components/base/loading' import { ToastContext } from '@/app/components/base/toast' -import type { ChunkingMode, ParentMode, ProcessMode } from '@/models/datasets' +import type { ChunkingMode, FileItem, ParentMode, ProcessMode } from '@/models/datasets' import { useDatasetDetailContext } from '@/context/dataset-detail' import FloatRightContainer from '@/app/components/base/float-right-container' import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' @@ -111,12 +111,10 @@ const DocumentDetail: FC = ({ datasetId, documentId }) => { } const { mutateAsync: segmentBatchImport } = useSegmentBatchImport() - const runBatch = async (csv: File) => { - const formData = new FormData() - formData.append('file', csv) + const runBatch = async (csv: FileItem) => { await segmentBatchImport({ url: `/datasets/${datasetId}/documents/${documentId}/segments/batch_import`, - body: formData, + body: { upload_file_id: csv.file.id! }, }, { onSuccess: (res) => { setImportStatus(res.job_status) diff --git a/web/service/knowledge/use-segment.ts b/web/service/knowledge/use-segment.ts index ca1778fb94..8b3e939e73 100644 --- a/web/service/knowledge/use-segment.ts +++ b/web/service/knowledge/use-segment.ts @@ -154,9 +154,9 @@ export const useUpdateChildSegment = () => { export const useSegmentBatchImport = () => { return useMutation({ mutationKey: [NAME_SPACE, 'batchImport'], - mutationFn: (payload: { url: string; body: FormData }) => { + mutationFn: (payload: { url: string; body: { upload_file_id: string } }) => { const { url, body } = payload - return post(url, { body }, { bodyStringify: false, deleteContentType: true }) + return post(url, { body }) }, }) } From fce126b206a7b08b6d47f1c06462e54f257c03f4 Mon Sep 17 00:00:00 2001 From: chenguowei <457219884@qq.com> Date: Mon, 28 Jul 2025 15:37:13 +0800 Subject: [PATCH 030/415] fix(api): fix incorrect path handling in Langfuse integration (#22766) Co-authored-by: QuantumGhost --- api/core/ops/entities/config_entity.py | 2 +- api/core/ops/utils.py | 8 +++++++- api/tests/unit_tests/core/ops/test_config_entity.py | 7 +++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/api/core/ops/entities/config_entity.py b/api/core/ops/entities/config_entity.py index 89ff0cfded..626782cee5 100644 --- a/api/core/ops/entities/config_entity.py +++ b/api/core/ops/entities/config_entity.py @@ -102,7 +102,7 @@ class LangfuseConfig(BaseTracingConfig): @field_validator("host") @classmethod def host_validator(cls, v, info: ValidationInfo): - return cls.validate_endpoint_url(v, "https://api.langfuse.com") + return validate_url_with_path(v, "https://api.langfuse.com") class LangSmithConfig(BaseTracingConfig): diff --git a/api/core/ops/utils.py b/api/core/ops/utils.py index 573e8cac88..2c0afb1600 100644 --- a/api/core/ops/utils.py +++ b/api/core/ops/utils.py @@ -67,7 +67,13 @@ def generate_dotted_order( def validate_url(url: str, default_url: str, allowed_schemes: tuple = ("https", "http")) -> str: """ - Validate and normalize URL with proper error handling + Validate and normalize URL with proper error handling. + + NOTE: This function does not retain the `path` component of the provided URL. + In most cases, it is recommended to use `validate_url_with_path` instead. + + This function is deprecated and retained only for compatibility purposes. + New implementations should use `validate_url_with_path`. Args: url: The URL to validate diff --git a/api/tests/unit_tests/core/ops/test_config_entity.py b/api/tests/unit_tests/core/ops/test_config_entity.py index 81cb04548d..4bcc6cb605 100644 --- a/api/tests/unit_tests/core/ops/test_config_entity.py +++ b/api/tests/unit_tests/core/ops/test_config_entity.py @@ -117,6 +117,13 @@ class TestLangfuseConfig: assert config.secret_key == "secret_key" assert config.host == "https://custom.langfuse.com" + def test_valid_config_with_path(self): + host = host = "https://custom.langfuse.com/api/v1" + config = LangfuseConfig(public_key="public_key", secret_key="secret_key", host=host) + assert config.public_key == "public_key" + assert config.secret_key == "secret_key" + assert config.host == host + def test_default_values(self): """Test default values are set correctly""" config = LangfuseConfig(public_key="public", secret_key="secret") From 15757110cff780a72637b52037676f598e0a4072 Mon Sep 17 00:00:00 2001 From: Anton Kovalev Date: Mon, 28 Jul 2025 10:37:23 +0300 Subject: [PATCH 031/415] feat: default value option for select input fields (#21192) Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: GuanMu --- .../config-var/config-modal/index.tsx | 28 +++++++++++++-- .../base/chat/chat-with-history/hooks.tsx | 2 +- .../chat-with-history/inputs-form/content.tsx | 2 +- .../base/chat/embedded-chatbot/hooks.tsx | 2 +- .../embedded-chatbot/inputs-form/content.tsx | 2 +- .../components/before-run-form/form-item.tsx | 2 +- .../panel/debug-and-preview/chat-wrapper.tsx | 35 +++++++++++++++++-- web/i18n/de-DE/app-debug.ts | 3 ++ web/i18n/en-US/app-debug.ts | 3 ++ web/i18n/es-ES/app-debug.ts | 3 ++ web/i18n/fr-FR/app-debug.ts | 3 ++ web/i18n/hi-IN/app-debug.ts | 3 ++ web/i18n/it-IT/app-debug.ts | 3 ++ web/i18n/ja-JP/app-debug.ts | 3 ++ web/i18n/ko-KR/app-debug.ts | 3 ++ web/i18n/pl-PL/app-debug.ts | 3 ++ web/i18n/pt-BR/app-debug.ts | 3 ++ web/i18n/ro-RO/app-debug.ts | 3 ++ web/i18n/ru-RU/app-debug.ts | 3 ++ web/i18n/tr-TR/app-debug.ts | 3 ++ web/i18n/uk-UA/app-debug.ts | 3 ++ web/i18n/vi-VN/app-debug.ts | 3 ++ web/i18n/zh-Hans/app-debug.ts | 3 ++ web/i18n/zh-Hant/app-debug.ts | 3 ++ 24 files changed, 113 insertions(+), 11 deletions(-) diff --git a/web/app/components/app/configuration/config-var/config-modal/index.tsx b/web/app/components/app/configuration/config-var/config-modal/index.tsx index 8fcc0f4c08..72c66cf76a 100644 --- a/web/app/components/app/configuration/config-var/config-modal/index.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/index.tsx @@ -20,6 +20,7 @@ import FileUploadSetting from '@/app/components/workflow/nodes/_base/components/ import Checkbox from '@/app/components/base/checkbox' import { DEFAULT_FILE_UPLOAD_SETTING } from '@/app/components/workflow/constants' import { DEFAULT_VALUE_MAX_LEN } from '@/config' +import { SimpleSelect } from '@/app/components/base/select' const TEXT_MAX_LENGTH = 256 @@ -234,9 +235,30 @@ const ConfigModal: FC = ({ )} {type === InputVarType.select && ( - - - + <> + + + + {options && options.length > 0 && ( + + opt.trim() !== '').map(option => ({ + value: option, + name: option, + })), + ]} + defaultValue={tempPayload.default || ''} + onSelect={item => handlePayloadChange('default')(item.value === '' ? undefined : item.value)} + placeholder={t('appDebug.variableConfig.selectDefaultValue')} + allowSearch={false} + /> + + )} + )} {[InputVarType.singleFile, InputVarType.multiFiles].includes(type) && ( diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index 76eb89164e..382ded3201 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -211,7 +211,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { const isInputInOptions = item.select.options.includes(initInputs[item.select.variable]) return { ...item.select, - default: (isInputInOptions ? initInputs[item.select.variable] : undefined) || item.default, + default: (isInputInOptions ? initInputs[item.select.variable] : undefined) || item.select.default, type: 'select', } } diff --git a/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx b/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx index 73a1f07b69..3304d50a50 100644 --- a/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx +++ b/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx @@ -73,7 +73,7 @@ const InputsFormContent = ({ showTip }: Props) => { {form.type === InputVarType.select && ( ({ value: option, name: option }))} onSelect={item => handleFormChange(form.variable, item.value as string)} placeholder={form.label} diff --git a/web/app/components/base/chat/embedded-chatbot/hooks.tsx b/web/app/components/base/chat/embedded-chatbot/hooks.tsx index 8ae86bda84..4e86ad50e4 100644 --- a/web/app/components/base/chat/embedded-chatbot/hooks.tsx +++ b/web/app/components/base/chat/embedded-chatbot/hooks.tsx @@ -199,7 +199,7 @@ export const useEmbeddedChatbot = () => { const isInputInOptions = item.select.options.includes(initInputs[item.select.variable]) return { ...item.select, - default: (isInputInOptions ? initInputs[item.select.variable] : undefined) || item.default, + default: (isInputInOptions ? initInputs[item.select.variable] : undefined) || item.select.default, type: 'select', } } diff --git a/web/app/components/base/chat/embedded-chatbot/inputs-form/content.tsx b/web/app/components/base/chat/embedded-chatbot/inputs-form/content.tsx index c5f39718f1..29fa5394ef 100644 --- a/web/app/components/base/chat/embedded-chatbot/inputs-form/content.tsx +++ b/web/app/components/base/chat/embedded-chatbot/inputs-form/content.tsx @@ -73,7 +73,7 @@ const InputsFormContent = ({ showTip }: Props) => { {form.type === InputVarType.select && ( ({ value: option, name: option }))} onSelect={item => handleFormChange(form.variable, item.value as string)} placeholder={form.label} diff --git a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx index 269f5e0a96..430359b845 100644 --- a/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx +++ b/web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx @@ -158,7 +158,7 @@ const FormItem: FC = ({ type === InputVarType.select && ( handleValueChange(e.target.value)} placeholder={placeholder?.[language] || placeholder?.en_US} /> diff --git a/web/app/components/workflow/nodes/tool/node.tsx b/web/app/components/workflow/nodes/tool/node.tsx index e15ddcaaaa..8cc3ec580d 100644 --- a/web/app/components/workflow/nodes/tool/node.tsx +++ b/web/app/components/workflow/nodes/tool/node.tsx @@ -22,13 +22,13 @@ const Node: FC> = ({ {key}
{typeof tool_configurations[key].value === 'string' && ( -
+
{paramSchemas?.find(i => i.name === key)?.type === FormTypeEnum.secretInput ? '********' : tool_configurations[key].value}
)} {typeof tool_configurations[key].value === 'number' && ( -
- {tool_configurations[key].value} +
+ {Number.isNaN(tool_configurations[key].value) ? '' : tool_configurations[key].value}
)} {typeof tool_configurations[key] !== 'string' && tool_configurations[key]?.type === FormTypeEnum.modelSelector && ( From 63b6026e6e414d3c1970944457dd77fb4b105336 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 10:59:43 +0800 Subject: [PATCH 047/415] minor fix: fix error messages (#23081) --- api/core/workflow/nodes/tool/entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/tool/entities.py b/api/core/workflow/nodes/tool/entities.py index 4f47fb1efc..c1cfbb1edc 100644 --- a/api/core/workflow/nodes/tool/entities.py +++ b/api/core/workflow/nodes/tool/entities.py @@ -55,7 +55,7 @@ class ToolNodeData(BaseNodeData, ToolEntity): if not isinstance(val, str): raise ValueError("value must be a list of strings") elif typ == "constant" and not isinstance(value, str | int | float | bool | dict): - raise ValueError("value must be a string, int, float, or bool") + raise ValueError("value must be a string, int, float, bool or dict") return typ tool_parameters: dict[str, ToolInput] From 47cc95184132b756a94e58c2e0c85fb01044eced Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 11:17:50 +0800 Subject: [PATCH 048/415] Fix Empty Collection WHERE Filter Issue (#23086) --- api/services/app_service.py | 5 +++-- api/services/conversation_service.py | 6 ++++-- api/services/dataset_service.py | 26 +++++++++++++++++++++----- api/services/message_service.py | 3 ++- api/services/tag_service.py | 6 ++++++ 5 files changed, 36 insertions(+), 10 deletions(-) diff --git a/api/services/app_service.py b/api/services/app_service.py index 3557f13337..0f22666d5a 100644 --- a/api/services/app_service.py +++ b/api/services/app_service.py @@ -53,9 +53,10 @@ class AppService: if args.get("name"): name = args["name"][:30] filters.append(App.name.ilike(f"%{name}%")) - if args.get("tag_ids"): + # Check if tag_ids is not empty to avoid WHERE false condition + if args.get("tag_ids") and len(args["tag_ids"]) > 0: target_ids = TagService.get_target_ids_by_tag_ids("app", tenant_id, args["tag_ids"]) - if target_ids: + if target_ids and len(target_ids) > 0: filters.append(App.id.in_(target_ids)) else: return None diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 525c87fe4a..206c832a20 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -46,9 +46,11 @@ class ConversationService: Conversation.from_account_id == (user.id if isinstance(user, Account) else None), or_(Conversation.invoke_from.is_(None), Conversation.invoke_from == invoke_from.value), ) - if include_ids is not None: + # Check if include_ids is not None and not empty to avoid WHERE false condition + if include_ids is not None and len(include_ids) > 0: stmt = stmt.where(Conversation.id.in_(include_ids)) - if exclude_ids is not None: + # Check if exclude_ids is not None and not empty to avoid WHERE false condition + if exclude_ids is not None and len(exclude_ids) > 0: stmt = stmt.where(~Conversation.id.in_(exclude_ids)) # define sort fields and directions diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 209d153b0c..1280399990 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -91,14 +91,16 @@ class DatasetService: if user.current_role == TenantAccountRole.DATASET_OPERATOR: # only show datasets that the user has permission to access - if permitted_dataset_ids: + # Check if permitted_dataset_ids is not empty to avoid WHERE false condition + if permitted_dataset_ids and len(permitted_dataset_ids) > 0: query = query.where(Dataset.id.in_(permitted_dataset_ids)) else: return [], 0 else: if user.current_role != TenantAccountRole.OWNER or not include_all: # show all datasets that the user has permission to access - if permitted_dataset_ids: + # Check if permitted_dataset_ids is not empty to avoid WHERE false condition + if permitted_dataset_ids and len(permitted_dataset_ids) > 0: query = query.where( db.or_( Dataset.permission == DatasetPermissionEnum.ALL_TEAM, @@ -127,9 +129,10 @@ class DatasetService: if search: query = query.where(Dataset.name.ilike(f"%{search}%")) - if tag_ids: + # Check if tag_ids is not empty to avoid WHERE false condition + if tag_ids and len(tag_ids) > 0: target_ids = TagService.get_target_ids_by_tag_ids("knowledge", tenant_id, tag_ids) - if target_ids: + if target_ids and len(target_ids) > 0: query = query.where(Dataset.id.in_(target_ids)) else: return [], 0 @@ -158,6 +161,9 @@ class DatasetService: @staticmethod def get_datasets_by_ids(ids, tenant_id): + # Check if ids is not empty to avoid WHERE false condition + if not ids or len(ids) == 0: + return [], 0 stmt = select(Dataset).where(Dataset.id.in_(ids), Dataset.tenant_id == tenant_id) datasets = db.paginate(select=stmt, page=1, per_page=len(ids), max_per_page=len(ids), error_out=False) @@ -951,6 +957,9 @@ class DocumentService: @staticmethod def delete_documents(dataset: Dataset, document_ids: list[str]): + # Check if document_ids is not empty to avoid WHERE false condition + if not document_ids or len(document_ids) == 0: + return documents = db.session.query(Document).where(Document.id.in_(document_ids)).all() file_ids = [ document.data_source_info_dict["upload_file_id"] @@ -2320,6 +2329,9 @@ class SegmentService: @classmethod def delete_segments(cls, segment_ids: list, document: Document, dataset: Dataset): + # Check if segment_ids is not empty to avoid WHERE false condition + if not segment_ids or len(segment_ids) == 0: + return index_node_ids = ( db.session.query(DocumentSegment) .with_entities(DocumentSegment.index_node_id) @@ -2339,6 +2351,9 @@ class SegmentService: @classmethod def update_segments_status(cls, segment_ids: list, action: str, dataset: Dataset, document: Document): + # Check if segment_ids is not empty to avoid WHERE false condition + if not segment_ids or len(segment_ids) == 0: + return if action == "enable": segments = ( db.session.query(DocumentSegment) @@ -2600,7 +2615,8 @@ class SegmentService: DocumentSegment.document_id == document_id, DocumentSegment.tenant_id == tenant_id ) - if status_list: + # Check if status_list is not empty to avoid WHERE false condition + if status_list and len(status_list) > 0: query = query.where(DocumentSegment.status.in_(status_list)) if keyword: diff --git a/api/services/message_service.py b/api/services/message_service.py index 283b7b9b4b..a19d6ee157 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -111,7 +111,8 @@ class MessageService: base_query = base_query.where(Message.conversation_id == conversation.id) - if include_ids is not None: + # Check if include_ids is not None and not empty to avoid WHERE false condition + if include_ids is not None and len(include_ids) > 0: base_query = base_query.where(Message.id.in_(include_ids)) if last_id: diff --git a/api/services/tag_service.py b/api/services/tag_service.py index 75fa52a75c..2e5e96214b 100644 --- a/api/services/tag_service.py +++ b/api/services/tag_service.py @@ -26,6 +26,9 @@ class TagService: @staticmethod def get_target_ids_by_tag_ids(tag_type: str, current_tenant_id: str, tag_ids: list) -> list: + # Check if tag_ids is not empty to avoid WHERE false condition + if not tag_ids or len(tag_ids) == 0: + return [] tags = ( db.session.query(Tag) .where(Tag.id.in_(tag_ids), Tag.tenant_id == current_tenant_id, Tag.type == tag_type) @@ -34,6 +37,9 @@ class TagService: if not tags: return [] tag_ids = [tag.id for tag in tags] + # Check if tag_ids is not empty to avoid WHERE false condition + if not tag_ids or len(tag_ids) == 0: + return [] tag_bindings = ( db.session.query(TagBinding.target_id) .where(TagBinding.tag_id.in_(tag_ids), TagBinding.tenant_id == current_tenant_id) From 77216488675db30d21e43466cca95dfe82bd9c0c Mon Sep 17 00:00:00 2001 From: GuanMu Date: Tue, 29 Jul 2025 11:24:59 +0800 Subject: [PATCH 049/415] Fix variable config (#23070) --- .../config-var/config-modal/index.tsx | 1 + web/app/components/base/select/index.tsx | 4 +--- .../share/text-generation/run-once/index.tsx | 4 +++- .../workflow/panel/inputs-panel.tsx | 24 ++++++++++++++++++- web/utils/model-config.ts | 3 ++- 5 files changed, 30 insertions(+), 6 deletions(-) diff --git a/web/app/components/app/configuration/config-var/config-modal/index.tsx b/web/app/components/app/configuration/config-var/config-modal/index.tsx index 72c66cf76a..27072f5208 100644 --- a/web/app/components/app/configuration/config-var/config-modal/index.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/index.tsx @@ -244,6 +244,7 @@ const ConfigModal: FC = ({ opt.trim() !== '').map(option => ({ diff --git a/web/app/components/base/select/index.tsx b/web/app/components/base/select/index.tsx index 77d229672f..d9285c1061 100644 --- a/web/app/components/base/select/index.tsx +++ b/web/app/components/base/select/index.tsx @@ -77,7 +77,6 @@ const Select: FC = ({ defaultSelect = existed setSelectedItem(defaultSelect) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [defaultValue]) const filteredItems: Item[] @@ -201,7 +200,6 @@ const SimpleSelect: FC = ({ defaultSelect = existed setSelectedItem(defaultSelect) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [defaultValue]) const listboxRef = useRef(null) @@ -344,7 +342,7 @@ const PortalSelect: FC = ({ > diff --git a/web/app/components/share/text-generation/run-once/index.tsx b/web/app/components/share/text-generation/run-once/index.tsx index 546b21d2b0..cfafe73bf2 100644 --- a/web/app/components/share/text-generation/run-once/index.tsx +++ b/web/app/components/share/text-generation/run-once/index.tsx @@ -66,7 +66,9 @@ const RunOnce: FC = ({ useEffect(() => { const newInputs: Record = {} promptConfig.prompt_variables.forEach((item) => { - if (item.type === 'string' || item.type === 'paragraph') + if (item.type === 'select') + newInputs[item.key] = item.default + else if (item.type === 'string' || item.type === 'paragraph') newInputs[item.key] = '' else newInputs[item.key] = undefined diff --git a/web/app/components/workflow/panel/inputs-panel.tsx b/web/app/components/workflow/panel/inputs-panel.tsx index 8be8d810f0..64ac6d8686 100644 --- a/web/app/components/workflow/panel/inputs-panel.tsx +++ b/web/app/components/workflow/panel/inputs-panel.tsx @@ -1,6 +1,7 @@ import { memo, useCallback, + useEffect, useMemo, } from 'react' import { useTranslation } from 'react-i18next' @@ -32,9 +33,12 @@ type Props = { const InputsPanel = ({ onRun }: Props) => { const { t } = useTranslation() const workflowStore = useWorkflowStore() + const { inputs, setInputs } = useStore(s => ({ + inputs: s.inputs, + setInputs: s.setInputs, + })) const fileSettings = useFeatures(s => s.features.file) const nodes = useNodes() - const inputs = useStore(s => s.inputs) const files = useStore(s => s.files) const workflowRunningData = useStore(s => s.workflowRunningData) const { @@ -44,6 +48,24 @@ const InputsPanel = ({ onRun }: Props) => { const startVariables = startNode?.data.variables const { checkInputsForm } = useCheckInputsForms() + const initialInputs = useMemo(() => { + const initInputs: Record = {} + if (startVariables) { + startVariables.forEach((variable) => { + if (variable.default) + initInputs[variable.variable] = variable.default + }) + } + return initInputs + }, [startVariables]) + + useEffect(() => { + setInputs({ + ...initialInputs, + ...inputs, + }) + }, [initialInputs]) + const variables = useMemo(() => { const data = startVariables || [] if (fileSettings?.image?.enabled) { diff --git a/web/utils/model-config.ts b/web/utils/model-config.ts index 330d8f9b52..3a500f22bc 100644 --- a/web/utils/model-config.ts +++ b/web/utils/model-config.ts @@ -62,6 +62,7 @@ export const userInputsFormToPromptVariables = (useInputs: UserInputFormItem[] | options: content.options, is_context_var, hide: content.hide, + default: content.default, }) } else if (type === 'file') { @@ -148,7 +149,7 @@ export const promptVariablesToUserInputsForm = (promptVariables: PromptVariable[ variable: item.key, required: item.required !== false, // default true options: item.options, - default: '', + default: item.default ?? '', hide: item.hide, }, } as any) From 27f400e13f85b656fc57ac7d7bcb120b6ba8161b Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 29 Jul 2025 14:05:59 +0800 Subject: [PATCH 050/415] feat: update banner (#23095) --- images/GitHub_README_if.png | Bin 191695 -> 72664 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/images/GitHub_README_if.png b/images/GitHub_README_if.png index 10c9d87b08fab53abb7cef67cd6ccc7cdd1b91e5..281d95cf9cfc4c9fd6aede63a1b9b4d00d8f2e85 100644 GIT binary patch literal 72664 zcmeFZ^;=b2*9N>n1Vj*!?oa{g?v_$o*nl7{0#Z_fw19Lef^_GmL%Jm-q`Rdy9TJ<~ z-`qa$^M2o-@crzvEuT5GO7=a?hzagQ-isD_#%9u6fA1OmZ(t|a#o0>O!ZK<>t1 zV}d*Ho}Ju)KkmI(dgTm(+$Tc)iv~$bAqO|noL?%+LP`gzx4>U8EM-(>Adrga``4y- zA!t-~&*fya-O+XzRBa8X5}CH27^pRrY7quMznj5ZK&j_f@a8Sr*Y1b(X9Ze<^!Ex* zqJ~BES2;f7=xK zr1`&p@k{%^NB<{{|5b&87rF7FE@_y?&- zd4n+0TDoMUx#;H_@?5$wJ6Td}J-+x}#x;7>`th&pMwyg)+f)}1X_;_$nE8J1QY*hG z?P}fCv8S&a%aIw`karR(ax40OB1%U81Mk1R%*KyJ6-~Ycge_YUGDS-D6k#Wjw!i%I z(&ROv(%kiFW_6{Ha0zl{z51eP8{InJ_xyrI*yHvje`^3ALy01OMQ`qULOEOdY@^Ir zZLZbx10?zAUIaQs8fj;5pEofvF`i>)D`>>5mO}2G>r+A3c^k93JyC=;tLC;p|2&#b zY7yZrWIwOuZ9xOJYh2l+IvYcwP+6Nv!sq&27xU?A>*|5!+qVy;8T@V75XNfCZut3F zt)k;fCLF9^)1+=@HU&Jlt1!H^s4aKsTKRFziyr9XqXOe*>-ze-%f;XIai*^6-9$di z(L$|!1>?fSI5yqU(b1Qao^+C4?in5Td!lHR4GavfS`PXxKNgl1-+@5? zA4}`~9^Xr6WzWs5Q{yIn*kq%Ju{1FRVxkmQF=OT>*lnzyE}6>tx{h8Pgln$K)~R){=j@`5ilKuocnAk}{)UV0`OifMgb@oIty)xb>Cs@oW&i zAQ-$y03}}~ncy-n7MRU6ZgS^A12@+mlHX;m@I1Rp6?412M)1VEYGPDkmBU#e;;3*y zUcc9OKZqMAA*3{XOPGw_zJ&`tE(%&HFC1V02YZ3Zd`$BQr6Z!+}g*VIUpGquCJu>Y@{yvWr)IFG#_|fyi^5(^b2smK0hj$wW=;kPfk`wO>qcJ zv06>@vx~L0^~sXF-^umGaW-V@%Ok~y8tK?IhR9l71-pf-wLRafZmXHN%j|pv3-AfuSETbglt~J-deT}|#dmppc6Mep>UF&^JF71mVx{i8(i!?+@qYlJ zOw`WC#^xk!J6lUjD|^{W^ON?Dhj4<>#w8sohn&^Pui6%++FK(}4<`8F5Pmdl8Yc`$ z_(U;6ru*D{!mPt9;$vxP>8*%%5}Ry3@;n+TTw%fnW;;v=R_oB&W{sQG&GvQ#D3Ip5 zCb?GN{G=4Ef?oA|IJ_)p>%i+v>CNUC23Eyw`1jFM-xEgp>lb`imt7wF+F| zhc_QtMEfnWu|{|MZC9B0C{!KQ*47doy3R#E$o-56qWR#tlin(Vb1Ork2|Zc!y~!&_ z?QC4g^5#OLn=-T|f@j1^9Ut7%ARxbs`Q+^~7dv~-l#wS#s)XkUek&Ytk_p0ETD+?> zlbw-F7lX}c6++IITZbhjOXMFZyxVCo#SDFUD}Mbvcu4tXceescn_1jpc({|7mmUpH z>wN{=o2NTdP#TLvor6~KcZ%vS?jJPs!fJ51{w_}iYm)#FpP@rYnVOv3@8esYi-j7G z9f2M|230(mujhk_QlgFi`0)ednaho_@pEz4i}cnn%O4pZUtap&+=rrMRIo5nq+3-K zVQ+8G1V;P$GtWaB<-#wW;e*z?L(6g&n(eh^`pEP3>az;h4qf^Cmu$%ENKGma@Um53 zuJZTFMP_c4``Kl>i*|_HI5%-WiWj3FQ9bTMy7PIVhK}Y1SZ#8)p8$A=e+!t?s{_yL zbH{&61ygO3t}Y^@&OH$Hz4`hgX^61Bt$^hozR%5LcFLans3CS99->#xJl&{RLm+;H z`HNooMf$=;{ku>ur@2dtChQ04H^E2{?dn}Yu9iU)MJth%p{7|P*xfrdIjK+<7Z)dm ze8oZmo{2rChrD3%&%S^E{#JJ+)z(T^_`L>uL)Q^9edOm;zBfR&UpHF4fK=LsZ599M`RAc|_dM(jw`xM}`x5 z>Ihtl3(#Gc?eW49l)su)G$A&@_0K+$4`hIFfO_Y zQ{y+_aD?~L%8D7sYIadk(bnJf{;L)Turd5^#Y)MqBtV>NIS99ivN%Y_4(|RCU1qVn zyL&D2nlcNgv;riierI0@(1iUKO190rC0taiXW#l3?bs&#_1pf?KLn&GHQN&u z5~^C9g2!*EC{(Al`rKT0`e%8Pw2z$Mb`JuPB{4ri=a%8ox;&jS9(p%{p*rW|!eVzl zTOu8$+CJORz%L^` zJ@Oy3uM7=Uhjo%$UkdO|tuE2MNbdO2%*+myO3oNS8%?pb5D9j^QxVm6vi$n`dXyi| zu#QGP((IVbmczG`68*M#X=#dr*A1qorVO5Dm%tW@c1h8GZ-xn!78Vw2PRx=or6nd_ z%p#&-nMZxr&U6R$UKdVa4H-u0*${!S_Id2c#X84Fh9G3i#0qK;u&=k5X}{S^#tu)r z33kX!1!4o=AO1i{`n!aa(mWaj$mit-E**XMocBqtUPf3e>hUW^eCp9V_xZEoP6ysS z{Ys2h@JiE;-)O`GE2;ccNxGDMr`UI~?&vpHZPA=5cxAT4YAKb9G*a+{a6e#y@%?K- zc1oF%Xvs^Kg=SA7F?Pz8=J4?Ftv^45ahSpD)e+2V5WLWt31;;)>L1&m8tm>%MXgu7 z_|{x;xX5O$6=w)u(r4HAR8%^dt}wT@uy}H{w9xd?nV9mkY=jIPGwME}XjI1L;^eVK zrw@7ih&v5{yd&X#ZSaCxUNqaFA)b#8!_VJuS8vmvV^>7L_LER_YR`1t!C!Y(kuI zH;YDN;BVCM=E;eRKg4YoUqVs)R;40i1cS%X4n4flJiHV7A z(dXsmDa6VDK)x*_cZ;&ut+!ujfcE+jPbe(;B5z#Ic4tN;LCBEi6X(FP3MH=&??XfE zFA!1QE%-I#d;WIT<1sfL_~`kp4nqa@3Qdly@C`JtVG}5ZVi~MclJe6yn{Mf2FvX-l z)*{m3ohHXSWvyxUOD(AxFT`5axrKofWBg|$4sy_uO}&#p!E`v`e^oSl`Dz=iFrppi z>%PE9CzLHMEdh@O`F+?)onc`{aIjaq#3^u0)#q`2_3U2IWBrk#|B+lNn}1g8^*Wa# zTI;!kdpvNbyNJnpF0eg(|Fa7kQlB*a;`R0PlpLClx>=-g+S=vVQ1M|iV6`VYe`{;& zL<$KF zj#IJ;_INY)^z_tay)W(r<(yh5#nxVB5wrt%GwHcwCEq%AaSTT-4|6~Ge(7z${jjQZGJ84kF&9Wpc*I%c?vt+AUi z6IWdMxsAc7iQvkpi24F2fPymq^DB3%s;UZ~0$W8*5rZb<86!XCUal;Wo&RR^)`3qN zi^H~o@#BWo%T^dC@N8{wXe2!NQC0+H=)PSck!i*DU2OH0va_|#sU#&ORWOEcY&`rK z*LW~6Fp!hSe^CWQ6!ZdUYXJCgKsTm0#S~$CPlcYt0%4qdE$!1hploex@>bO^8jCo) z$xW(BqY138MRc2xzrH-$n*CuD`X|);Nk`9BsCQ29@Z0Na;k3TpIWSx5i2O|m4M{-D ztTIn&_7b5NyUm7!w#DAp&xiq(6V13{R3S?ZqLuVYASEMH{tPw0=C0)sxv6Y!PCacA z)ILv1Nq$^9pPW{G49$aaR{37!%kC|@f`nJyCvJCb8>L-Q{yB(VHXC@fR|k=Y%iG)U{F}8A zV20$hK=EnoK`1QvO}r+2^NfVgnFhEX{ZTi+?JO`1w-=>sUEyR^i$xp~LyRb(#4Vmy zaOXrx`@XQ}neFW2qFQ-6woDp<{{Z9bIlO*^HW>(ECVD{aczRzaJDpXkqrMXjOk@cA z-^_3#HWDN(Bz`P_#L;@sytW4QTv?k!0^|t*9`O8(jEux+L}kukSpGIdqWZ@V0h~z6 zLvty(x_8oaI!^YU5OR?Xg{Oj8WV1zR#oc4bgXVRPf%^hC{Y*z{W7WbF$La%24Nr?% zzYt=2O{SaHP784wx1>m(F9%=@5s@fl3*4YWvbaGp3d%5!*zZV`a*7@crROKM>a5p1 z&-XRC0SWM)03ToMX^WNe5I4X}7R_aNcd=ahz}yPHx3Q#>nvQgJb#=xjw(-s=9l_79R)*f<;r9SZC#9#WJ3BiAz%W`rG`g4G_{2Re+!AVP*9gpQY%=>6Q^8W* zJKp~KTphKbv?oH;R^b$C{_1Vqd2 z@+@(;OiV`va3#MedyD~!(k(MmRSkhZ0KYlDlZz3pwWPQ> zCr?g~1U&QrDrX-4A_s6V=P&&D-!ckKv;^ItpwC>b1G9U40dF6XmhZ*q?JSy18VN9w zXJqC+A~4F@lA^!)ZB`>25VypW(^<)VQgchc3ZD0(*GyYa6r zDFxg~T2R4`ELCEPkc=ujV1}rmmGO`#fqX(C`_d{2qCG2e1vRI?!xQH$Fk>mA9s%l&<95kc{Gcc ztL(E<@tWeW7SC5tAAF}w2O`WngQ4$S|CFEqr`CrI9v-EO+pB}gcOpII_);n47 zCsUn;O`f1wz@W$R7u(T6KL!kIE{eYYi+|9-t3DM>I?ELw3_Rx^TXQE564Rf03UMqw zWOIig-~cDOr>hvlcecQKryJ~9Q$icda2>hJ>({SU=zPxqY!)qBd2dr<~s9<#49OI#adVoGkXg*j9b`9U}ZBD##qojX3?&rbY3o3Hz5(x!No*DjAdz6kJZsZuwuL=j6Z zi8Ve`3vG*ubUtZ|H~GlaC?9yZoRFQHI}%7KV~%lvf}w#`ZrbUqOfR`sD$rdl5~za1 zQj6jCQm3AIQx?7S^%W|d11M0|#2oJiz!?xoE}(%&(hJqJDIy`3i8$pIt%~PD9oa_* z=DY3(ALiL=Y_21wZ$(HP9}U2+fK!t8#!&b)7=n{Mk|l{LI+QAw|3vfdqQ_2&ro66u z8uly6>t%fFKh1!~ZT>DsLLCXq5-PiSYABAd&YBbS^=1L~D6Q4N%}F8iyOQKdI{aj7 zEc-xcQ0|sdSf8aqDUv8EvfM0CnZh`tOp&{GSCSgZ$K_&X3YZKRc&O-iunaL+r_6 z$d}!IS07XNIk8TyHuu}9>>&ui@$D4~(!n|ix-rgPY&3euV;Cni}* za6nCs^~N&yEXK>T@cXA>5w2cqQmkKkUTek1EeC*OFOb|DsII7!KCF^jDA8}=Xbo&W6j*C7|c@E>onw;Lr<>}5nC;9jM zvrURcm!8PW;F0uXoM_sceX_0XXi1tIS~ui6ezw&8u(Kft+JE?^&0Ze6yw7nIxT`72 z2UMmb3+v-Zq~kU-)lU}$u}*f(u3{nnP)D0hh#c{{ub ztQ3-?`MGlp%NdVlgMqC9#1^OvQqvBJPRaP8Vmvz^7Hc*4a2ewMsm9Tb`FKCXY2Fi z-5&n=IimM|f?;_&t-|oG9}qE_AJk*>3d8}=HwOzqEoJ62TUI!qfZ zd9g;+{90^tLf2STJh6-V4*8)tsH|+w*E?sIMKDKI1UxuLSLsVlr&F6k?onAQZTMX0 zyR7yD3JFsEDxzIsDhqguYLJNt3JRuOEn+`X2zd|8QhVlQ^4||(d-au-Us13ZP+DVv z2XJ1pNd~ydB_RGpx~z*HI$+`h5;(;>`rpXxLVyRUaYrS^;M-Q|5-Zb6UhvItvb2Z2S)-@V%b9sPy>kLQXP!tjV-^bN}bZFyy-RvRo z8}L9zwyv%M{}n7`4@lyW8l-#rV+ZEe)=xnJQTYFT$b@=GNP4rS?wjP^f*r14gQ7bA zT^S2trAEtu5 z4~J_1ziT`LQvdb-bbCS*uown_k@(L~fneeZ_WDgo{0W$7sO4BZ0O$>)*$}<@AAMNEub&_;>9+ z=El+Z%9=ft|Nr@9ERZ9)HifsK-PV6)l#%*?8S;V+7ExMK@}z}THXA;aj>=*o!z9Vc z$<{#B0w_`+ut#rBL>*T;H`%H+i67K~ibKnR7%;0O00)Ne0&B7M^c34^#tDa0R{o*HtwiXX8mmHRxbJ-`1uSAJ%=XAny78kjNFoiR18I3e z<8bE{@KW+!|1d$qup>A*Ir9L6eE#H7=4M|cY;`-4cwF@7V9QxD(HcSI7deg$n$K}V#Ky)JYF}P5YE?P?kNA@|kiUz>j@*Wz+`+D^Fxq<)RDyhbd>on& zESO%sI=2;=E@fq%LrCyUM-^}gmyb8`Ah-q*cLBBm>Jwu*Po!uue0~hl0MU?hT2#h_ z%ED0Ajw-;ZNC#47^$i1-awXKt_bz0YYjzZ0f}4^tq~M)rBT4&WX1E_Vgxl6piU8VE zL~L$zq;F(I>p9>EO4}!|Rza483ODpt&+)k#2+x)av5BxF(c5mTbA4sk`_2sX^n)7ph3nt{GS6Kt3u~}qPfNMtFC8q1VTTwva-UGlYWd2@he4% zG$xBfLcI`BSRb?cHix?cb^*~OC)VY?FgdA1DcZvd8P36uQb@42w=W%UZFGi5^`hWH zTL}1JjE{cTP?!dqbXr~T@E|)1#>Eny4;%BNklNetjuMbd#rsE`Y&)D6xEuxrWNpL1LJhQoj`v zjO#KbS4UsfGf&7n{{$_buAxH&-#HEYd|Ey2{^#d92Gga|-C0ro@}nB)!3eSkPHlv9 zb|je?6H=|4SfRA)^52jD9vFC>=5e?TbmOm;a{B1TMjqA-$3r{iU?Wq0R<}gd2&jr^ ze@P%MVipW)E2mL|sA%I3D~P=+5latgQ^dflKE&|5A+g&E_MsyL#D`IHl2uwwE-Mm< z&kYp#i-!o9-bz3tUhVJhdfDyrN`CJuQa7V8PfY*ORDmRK+-se_?GiBo-c(gxJ^EH$ zKtN!2Y46SgG^O8msVr>{QHs@NH0{ZO+f_00-3A#xh?S{6bq!ybt1CrY1IAwQCZ2zJ^QeNBS>xC7}N(pV4)d3mc1jA_0U?}Ye_xvsKq^l zF80fc+ceedNUX~vlUCReM1JD>{G#iieCeT%#q@0j zm&!YFDY(~H=aze`5pmxPqlAq+k0WmQDWoHo9IdSjN#<&cpO-C}8Lp-6tax^_Ny0tf zoOB#FpLDJ!;`vTCF>$dojc_;HW=b+InLU2;qUg?e0k$Bc`hHOS* zlGv69vrp;$AM>P9F@(cIHTpn5#Hi1MOKNyu99#DK><~yRZDs`Pdkol}**y)J>TjM= z$5zk{{QgbD()ra>vamxK9yf0(<|>tLyK}~2^Kz$FxRvgWVW;SMue#6I+hgC`p%R>4 zA&Zyrn#!x~oS}uIW03Brx*mMVj-0wRqX-Y>jwNUx(VHYQL?mml+wk05p?9iT&nZK5 zXt{Vh`Q}=fZmH3YOW?^q+^M76b!iSV@ih&tz;8yt*mXGB9YX#U`#H=hqtb$^lYT4a zXBGqI02+ja**}Z$FQ^SpDWH9Ojv?5`LVMQ3b}L?(PKTC&-P-l+GzDUw3X(>?%Vxlg zNq#w0@`}b5=|l{as3`NT@EHU)p;n5aXeJfCDja2bCBp{{CTz(WadAU$M$>e2HkW-c zQRD2P3#};IVBME6TRDDwAQ5KcyUVO9wKMN68Df#LCK@IE)^Wk%C*MMIzi8-E;dDHx zlPTE2o94}4s7AGSH*jbrl5T=tGd~=;;Ya$*hk+n=e|rS-PCC7f9dYjGkvhFtkY5vd z&m{?8i+}w4<`Lp+EbhZ*C2{R&jTfn(0^p(Bg`f!`MN6JrC|&^FLIesgFeW4}6-_Y2 zxW9*W8WvhzbD-6AYB35&tfbJEWvVC9=)S&hKF81V{PDQs0pXS5bwu7-Y{MXyqc|~w zMD^*UE3}{T&ve7!N5b-fy$q5#?PP);^{4{&*}Mn$+u8y0)b37kZZrp#_cElxKW@c)3$1bm`Yx8fHm28**CI z_ePBP&t^k(?KJ)hd=?)@V$Pts#bxB`uq3EM5_L+*&jyg4nBIRQ%%WtA4c2)@}#MU~{6|fa?fTDh5(Gu@S54)5v7O zLx(s7_xr+LFpOj45!rtA2SNCEbsm-N$g-hSD(B<+e%>Hdoz!qH+63gxfM~Ifm`^oruLs={XqHT zhV&v)a)Uf>xdrn-^|3_upoHvqi;4?MxNSNjHmx?=TTPOhlZ029?7d81dYq3a%47B|r zYV_oSNj6zEb#>L<2G(aoMm)95gq~bsIP1j8KKV0eD4+{r*MC93?*Je{DW$m631K zUP^%O8C!IeJWtj{QE5kY9xObP>TwdNTLP%9#K*^%9lUf@&vwfxclc!g^D4=wZ`exT zXvQLZiDcq|U+1^h>^h-04N4#&>8CaZ6bP=G*DV%Pin}{qojby1H2eePJ+F&(Kby92mddM&ABQ{tF>q*(>th`Z z=vrG|Ks>7`>nsAQS2WO~6(|PizU{- z%Io)Aqq(7U_*f+`jpye&c@J(Ey6bBT|F-1S!RO1utW&|xXS&C3XSM=-#!4(yfS#=m zaIW-69(t)0&rtFL67rI%VMIN7TgjCvIiyW^rTxQxf`RyE>dDVV_g_n$3C7p+l_Uvf zDQAF!;3|$GmI+_T{7N8|zw6Z98p_=VGT+f0xy>RK;W~nTB4`QY@RoGiyA2^N^xh1O<-EHmd_Y;xG-^oG1S6=He4NzS9I^ubaeB|Dh zB#Uo8>$zGHCr40Vk?hp$N~o!LL@i0w;=H+TIcPdoyPO&FiC+k64qIiY*%Br)?OUAd zlq8y}Wje51@gV**m-3>d;$VV@ly>J)@blAQV~{G5gtg13h2U>Mq($TnF6Y}DCBtOA zx8uZ2rgbr8(fl}BsOXEc;@;S2FwlCCO{Aach;Xoat_Sh<1>I-VCdh=%>tuL#$z{36 z+5M)D(>1SW7LXKIJ8jq7&^x=4<@F@#f32+y#m;y|7{gkH_n>m9hg`0yFDN&x&v^)k zSZ$k}g%y6PIwS27n4RT+B)r3TYqv{9SjkG%6z%M?8?5=f29IH$k^Ky?qidhxwPxv| zx`go@RBGXe32mFY>zLagbWauyZg_|%9Gsc?zLmVAoU?27Ui;Wxf7as>dJLEG_pvzG zb7J}_u3UVKecW^T5%6tQY&Gha?jfGZmEAL?M~i03QGA3}FU^p@?nQK;g&f<*!RO}p zU#e;+U~V731m+E`{6#hza|P&0?1q^2+@SWmy7V`#(2%|M;KZ9#!?=h3}KL`c@}%Xyv-jldgEim0*&$ znqxFF%JHgc#HvSp==yj`4+5PJgV3~8 z@2Y7^`Ss%(o8sDY$=G7_bha9(T88kX^9*a1#kzfuq}n(n4diSE!#Q)}uBqi%kZ+cx z2eCEo&Sq;1TQMnhsYU+@>`pr>_MCPY^fmCn00n=+#QE3K^wM93P zU?B%x#{!+D%Srt&ny;;5=E!AS3BPWj36tvpXdl~0ggeKY7>Zb+AIb~wW}wwA^wG`{ znnc}HK0Ufp7(S)np&wi&jA1k1G}LqI`tT8XgJAZ-V|C^%UYVtfHDu&ZTBV1#>kZDw zn6NL&M1PhIG&OHr4>DusQ80M>>Ss=^4ZDrDp54kK(^)<>$oC3g)c0tU5DogB0qUw=bY+px5JfU>Q)Ir>xi>Tviwo2W* z(KuB^*wG>{T2X*D1qYFM=(|@?PPbxR8t!dPCVHAV@fvUF$`yS7fUx*@MnXL?EHw5( z@E%Fe2mAcUX|5fgL8A0AW}59jyU(;{={mUU7cktDu9kSJoZ`z_x+|4IM_&85k@=%K zSUjJb_-;mzEVa5tH*L{uEsn*YZ!E#;b$$CbY<3jxllFcH>jqG#8a!1FDCP4zTB|AA z;v=Df>8|gda)w|bnLN=Ts-aiq1?NPF<|BFpNG-NJm#!8IvaGJ%>~q@}%wAWP>U=%Y zsPAJ{t;hep%HTy4>*o~k$TTIxfraR6e>U(;Q0d`7=A}`_Y*Ru zzVQhgH^bhRx`AE>h;(Ve*~NBInc4}-(lM%`6;(=w5=VMgIm%{zW}T}%fVzJE$=lw` z>Kok*mO@xcoMKpu;kI3dPeDAc{}*PwSvG@PmJ8 zp0@?efKEH{c}rh}4XZUk3Z~pWfLQq>28b7lPm}49|@gy?oi68SEr6 zjRCpL**;rql-GEK4ukgMsZCO(-LG%|qLKH*i$$yO2+%S=INdOGB0lOepCNcBrndiP z`6*L-$`3#9`9T(LE+(X+13$y7@A+UD_onEa7Y(o?j}6SZ?UpmR%O^^q zP2Jt&gI472X38b#VYv~JrbN7gx@Gh=f2TsYhb*50rf<@7&f_-u+q~bqnRh>~dYf-M zo1YB(q9P-TAzlf^!l!)oyWt|QsIy-2j8o?zM(287L#+2>zoftTaM=*vx&mh^g>S4 zsq(NI0QsI_hWR4xr71C_8{`>lmeAwz;i7QCqZk^9$*37M@9gl@M+YrejYkN{ABDkl z7PDa+Iukhc{WR@yOx@`_*iLxRelp)5mbXGqENe$@D}hL8S2Lum($#4@i44P6o6;hLEcdxU`_SR@lm? zz#8G$AI;4Yn|)YEex{ofo@jpiVVFPYOvAI_{>k*$5lbp-?wa`7swD9!>hi2l~ z=MO5m)}wB^$qRSHaf3k>1#jS`*9%YrBR3v-XZ?#v(=0~C#HBK5kqK3{&9Ff=0plko zF)<^wLsbF4M}>;?)Btp@@l-inyy|=OO6o=Fgl# z&57c@7@Mt!g)ZjQ1As0iI^tzEs&{)6CdHuR;6=*e6Mc1=I~odXD=}LWVgR+j~-S^J|c8u20-8poeI2EtP4pt!Py+Rz+{uQy+ML%|3((0~z!+z{TA|1Xb zHh3B8n(nMp$cgh4od43e6JV*n{s7EjC|)dP+>(dh3)J5_+wq#sirO_GCe(myPu;{g zA{W~=6ZipR?u7^4pXGif<#sK^-1&v71g(wXHXM<2l0V zT{f*Z71NdyNXmFvt=oQm`F$7=*bejl7c=zpzki*WaZcCl#{^T5+oS+UbYEmY8yEgG zBIdULrkSfD3GF{(<1qW@T^?}CWsJyqS~~VTU?63=yPrF?qW^ecWK6JLs!A zuce^j?BnCJQi<5M&Z5xRu*?fwC2h}GzD5hJPHmPj3g zE;-bY7?QEj09x%rkCr%fa@Q}M5N<=14QxWo6S)LU6%F%Np;=bLn*1L3kYw=77hbx#?{wBBN&x3w2W2=DeK#C{dC z0_hJZmd}^?v!t!D8a9-&jE4=4D)PA zn-3$o``@+R7u{0kD>B54bsnehE|&xFM}G$6D{^JFZ*&S+6A#$wGiRNb-V(AZhEeq{It3c>} z82lH{VzL$mudYf$l*TgX>rQVSm7FRyR0ahpE-&?6x(WxmX*a2(mXy~WkyJttxm7?M z6bQu+erA7h52B(YZWS@J4370#hnTgo#Y;@0t%DP@4=xwScRzwNK|3#Z_U3B_MEc)Q ze@qB$V(C08d!&Hqs!OHU`B+cHCJDHK#k+Lo#eCr0T*5*fC`fiUg_&#uPXNeVfwg1f zKxxGNh#(e73=z2@DjSy&$YFh)@Y zWPrMrl0^ zc?#p$R9WZnXT)|}#xBL3&5nb7cW4q!rj1@e#yfETFef$5VEVOCTxO_YC{PT=OfCE) zaWZb40+j5qP1bK8h?sZQul};b?&B4yziQlDGz4gbYS0>0L8x4u%I*8?@@!vnqSpBj zIICIU^oD|vv&rqi=yK1&R}A)cZll~vcYiDC0V7=J35ZK)_mRkyaw`nA$S)nJHG@K$ zHqWtpff<40FA|-eII;ew=#>N>Pg+O@RfDz#yHMR%BJzH`etroaW2`>!!gT>~##IP*GMBK5HW~l94%8hU9 zhhr3XFsmLYpZe2ux;h7|AT%*vePV$VoIh#3PJd?nxNqiiAye*(`a)-S?B_x{9rCsx zO9shP?ClI%0Lks#lE%(XrA}&2MuHR#E}ep!S`$R@A;UC1(R4!|O95E;FULT4Oy%Nt zJ|;@jPt>Zx7TOIdO$WZnt5;Vq7gl#v6m+BZ=F`&~#Wi#W>&w;uT=1HXSV{PT8pgOl z{Zw)0(wv;%@o28{l@0+%7>l@#ig-K&+-L<=k|h@ec9xWoJC@Rj{2MGMVgPzj&3&)P zLeE4%*Vu?n0qLUWli$7timfJ-#mv#Wgv2FiDfs&=$U7N4y-2nH!<<6aM5Xr^N{l(yO(bkYGtD%}vV2-maZN=VQg~oigPlfa8 z2Z?0RUJQm=P|2FSN|HqWFijWrzI2(q5b*CF z($^J%A5sa~KZg91)6vy84J=^E+7vXE^NHuUjSW_~b?A^`j(W6)`8IhHC+0WnoXYl3 zMu*odGiyGR?!0?sus;1ko1}ka?Xh%vYY$nE>tF@Vm7ZGe3~Vh;-Mc%Fwf;(nj!6z4 zHRdX@tg9Wlqz=40w7;IteX=3UvnmDWaBxW)B+(fk{#%|II_i##7se} z_t7C422OWeRD-6S4l10#KN%PteA`@Bcy)zsSyS@Fn?@dI#ZsGs=_yfxHRJe0nQ!1& zQjU2~)V-*vr$RYn<8Ow}>qUBA?&jAsxU;aGm83YE#>Qqb3p{5)T#ak8XKe3S*+W;r z$@=j?LD!0~y;Nshc6N5J^;YN7-$%_P_2~ZpIwpI%Afqt`p9B_43zUOq8@Oo&-4pIY zh9&AVzvQ924B22c=*|q;*8dr;Yb`6P+^`2OvyihsGe{J@*wrho0ra;{UY!NWZEIj? zni-O{k4%##O=C5(3gM=C+9y`e6L{=9wbePDRgmM`!WiE;K8Ps8vNliCQmra|7Xvj` z?AMO6Te+Uf3ng$|N^q7c7g|ele&BYCi32<^1R}w`wjIjDo!7T{>ooW9^b6|D#{x0_ zfO?AX7Z7{UUFxN(c7I;XXV{*jD8OwUxLo!8>HC;AwNlZJ#}*wOu2{Ke_K0&Yik>!` zvSvXX)q7F6h{XBM=gh&3ai1&CYa}6cN!6t-*}6{TeMj4=R~B4ubN8q=y-M;?uopkl2CFWo9Qj zmNQM<$CHMd8Ommd&INm$!CtSy8D4pNtDJ&E=nr0~pfg*}K1pWre1>)4-EHi6d2UzG zG&J!PU7-Hc9~e4UMe|OIAK9m=t{S1oLc>7S}^;pqub6*;AJD%!rpdQJSNjnV=ETX2@ zhud&E5@S$$VEKzgFZlIGGDrqjAI2u|opEtlD_7sG7rbKbsaWXG=+dxIq!3=koVGr_gM=WuRp#3QBNl(Es%GP|8>1n?&z_x zN`uug#+9D22obK+K2oIs3)S8RHv9{U`O17q?%k4EnoN#M;}k*(Cccse4nK2V-4H?w z7{w`MzVMQ$;OmE$sR$QgKhUX|CuAxV%gIUlL>TT)Q2tpQ>e* zO?6ZYbyi;Z)XO_p-S?c3P+_mb)#4$1AwbOp{qp%g87rciKLD^3{*jApAlIhZ_RvJ3 zUnxeD7^?Cxzjn}k&8%<7D+?v$`~^-w@qGzC1}%Cs2ORz+vHJVxK7H0BZWc zQO*A3)N`IdK?E2&_@zKsVbzjxAP)2C3>RxrH;=aBdatmKQzy@KK5I7U9ke&kT-J-O z@ck}Qvr1juz~ag#X8PDa&ha*Ft)8*Pa{p8+>I-kH!@jp_N-Xn7b)qGQ`SMMfp=TDg z*4I&Gt+$t*9jWpfHEgj2Oe31b1D#YR!oWvBARccv?Y5GwzD$33h8NyVe-m?O9GuV7 zW-=jJ^tsx*Q_C0WONG|S8~6Y4^wt4QfB*aU2xW@IM7lu%kp}5fN)ZI4y97o{cPQPy zRl1pUiP9~pG}0ZDE|C~82EQ{u-~0A&-1drd&NHs-@l-%`@1hUdve}rWk%_5La^}?M zTweGOfx0Y7M@Q$?*y^IRh!z=((OvUimvHtj2I6 ztlF0L*MP4_C-ep@UxTT~AoSLxZHBCcR@kGY2b15waSnAab#MHDj`$z=E!lsTI*1vs z*|SWIaWt(xIedab3FPLA@>tS#f9-am?x$=+@9Q?RPd=F`X;O*!+k>1QIGgDK`6YOy53CE-zN%^@cjZ{! zc}LvPw{0R<&Sy~I2*kGwFq~4T@+Zsy8FPqtx7?BUABOKAfmate01}2_htXg3RT^nT zgs4^;p6j|s4~xL)V*e=Xce^UJ3nqyq1vyU(COwvZk*L3}*CTE?(JLHdW;`=2q7eC| zD3YSz#NQ-#&sbPgVCdit>0#4ii=Wp$Bc3^nh7!j%!%zfqV_UYkN69!EbA0I`CusIu;rBD zk;3#PAP88}KRT3hUTjY7fo+V`7ydM^Se-Y~%3W+y>8{~UVjnu_o_Zd=y!Q9+-_2&S zsl+oMjrJUs!tAq5=Y__{I$wg?b__4270&7g8Xi|rI%dH6_NCO-jqiB_K{JqsWj&nz zl^l>VPRVZu=i80)<`C+_oLMoOvBW6w7eN%n!b$ybKy6ujF=S0=u$wmy8fhg?)sxmYY1TseaxJ@q8T)Z{)#xPB+`20*Q_ogZwk&l8V<C7wB+_tIo80I0O)hb^7v3>P zZ&XxlS2u(MoX5h^rI@ZO4bQfOAjDH4EE@+KJ_-Gv>D1tc7fooRN=o{k&G{7FfbiXc z!RE8h5WiNoaKu6O5OseSOghHD%!J2P6vND ztzwg!c^S%%Hc~ZIfFC0gr0p(^M7MIhjBL`xuVVaS^y*YYUDBKDZ2KPv%2XKx@443Y zQi?iBGtegj4IxQ)XHxDEQ~Z}r^^gxu-QrY*nZ{E@KEO3YmB&&x(^Z*H&TYJnNJoab++se{vV@RolfeI=tR$FkMpXgnw_lQ* zn{mh5Hi3^BZ{Q2u{~U4zgU0$ri#I8DH%NAT=oVuSj3^kA>BbmkzMZHwMxw~xE*e!i z69=%|{A2OIJN5Xt9qH_^!s^DxwD4!-_kgQ4%CEgKg!S-S;Ca8PFhyDPh%9}-QRwqo zilQQMApgC+Wq-%scHrxmP#-> zT*e~GxPeL+ah|+8%Z0lNOPeqMdAi&E`%_6seAS+XglF#8O(!e1pS>RV%L@P7Pwjp< z+Tbfj%*C5L0Kdna^BpUYpOy*9Q!|G{hqTEl9wN5C3;>CRVCQ{Dn0e&%;zyIyT3#>a z(<8kr&dyg=2}*B@DoQ?9(N(Fah9%D4H_)7_31GckJjKvw6+)?dT8(-e_$px9k1CwC z=ZEj1_<3(fis-krq)4hy3@5l5XB=%$$EylR-`)j9?%-LVY-ESz_{up+0q-9$f2V_^eTk@zzP;amfQm*-i(a-zHoK=w%#3 z?cbuuKiRYW|J%#}Qt8KszZG*G9o3h8{!iFPW_yfdS+tT^G41QlL64qmdnR}EbM{)f zdaeU-kIs2bkm|#%&(-*#wqNVq9zk0cLb0fF&7ykX65q6C`%!Q(|_hLc)pzmJ| z!2pQLG2xPC)xcO4Zt<+&ASE?*)CsH3hHNH=g@u)&ur`}u0c=8w1w;U=n!xYcV_*jf zDQ=|xHWaZAP=5dZJcl>WUgxF9Xb$?WdJ@>0YhGb)LDB@rIT0|5h3V;M0hIr&u|h(? zv5a--gD{metOy&W0#SEL+s1*cKDg4y)XLhAAUQ2qO+h2gpwEc~_ z7QtvY0Wv<2Z5aZ)^e<2-RU%vVqV$aH&;-|m5I(l*@z$oi(F1e|>Hw4gA% zaN(us4*+DSai!4$IM(Gc_^);eJ_+80ZOuFcOg8X4EXO`DkWVJ3Mw_QL9EU8ibA37fOO+IM%@cp^669 z0(Vg8!+%4Hy_=B{XOg*&91tPin~CARswTjH5Fps)dHnxB134Eg@d{W#@!zOp|HH_K ztA`+lf}0p6@B+$9I2_mzPX+RUG4G0 zR4-&V@J{@{D+stZ{EPLzIPf3_qUV<4OQL)5eS@E4YxEhn5o!&&H-b+iYmc$y5V>z( zzy3-eG0*O(YzQ}?$65#>n-xIDeIHcOn>5usF2JQkX+Qd1ypf&hZ5! z$~qurgzSIs@qO$9#{M;M`C+@Y7IJ=(*DM-}j1YJ-{Dq7>Sk=Wl7hmpJDf|EkGw?6? z5a{l-9DtbrFx2xHG9o>i6c`B(l~+?l<@5o&rJR%hGjYZK>+^HQO0VI_K&sWW9Ozab zi_iY51TN}|=;zOAX@PCh`hO50@To{pcqy7*U$XfV3Co-aQqwW^6%mg~I57?%w<%7cwaBd}J zwRC6WKwLq|UW-F|`cU3q@8m5xWcCrK=+N+RVQ$nPt0};SaHanM6~u$MFMtE}bsGvx zKhzC6w#DV(T9C>B=(;}NgE+?PhofrgZGhQvupzT36SVzJ2unE;?r)LKy83RjbxbnX z!eynvge67_y+vPP(AXU%&K}rJkE8iN&CKWzBkk_Z@jY!tM;$87zdBtTwU9uvqCqLEBhMUzOddlnZD<$d z`@#g!i>TJ*MQ>zWUp8Y8Dq(pO*b2lS@85uo14nDlQ!#F6IPkBKfxWgacgQ=w_Sl%e z`pFf13Y2)QV1Is=->#p#;FWB}SuUG#=EnRcq;@D_DJslA=Xqt;dE03~CdT>t;ggBH?c+(rA;XT+3tuwQZOYsM zXtW~jg>aNhJ{Ax;I6Tb089nU`4000zrm*OnObJbL!O_szz{Di5ris8V(FS7uStlPK zF`}R9C{PSek$F9ZA1Y36J}NB3ZUlIF+A=pF;h;p7!y;!- zdBHNeC~N{&9#IW920&`Q{(&E@oNP94LpJS4~=V^XwW(+pn97C2e5g8eo`rCLK z-@lc=9nHPr&1+Qu9;D+Ad&r+!H{(j)V0p)bmnR#yf>-ogMN)gu*_gC=4Vn{<9QdB* zft`f@h5}Ms5ntL;(%#3%XE&63B7Pk6S4%OKSZf|=9OYlB z#s+Z#^XJmlX&sg|nGYN(w`=96k2^_Z=x#_`-aQj#ai@8!3*tl|o(Bf2FRR7yOdN>& zi#vQIj(1hG-bU))nXY@T$5flfJF%Ltd6g@_tKji!bo`1ad-Z2QG5s>Zr zTurLz`6?twA7lgs%@A@@DhwyM1v!4$powOj#=!6bi`&QvNdHu>KI-9UcnyJADYOgr z4lMa+2?x~#eK|fn$ks}cIFfVRGB2XIe*VKba#HZoyDWe%M%>Or>o=lZ)6j>x?!N)y zjLjx6uvJU+2&lgk;{&=fh{PS%D2*@DD$+UmOFrC9=qyLAn{lv=It1v%Z^yFtD#qCpi8q6F~-4sMu6ft&}gcn74^rOoaJ-coRBy_|9^nE zwLi}Qsm=g~%J&|{IGnw;j&pY*BPz!9FR&0|ElLkxE`+e4;F!c)(9h9={m~z!J2Axf zx44hljc2)DjmVr(x~jXLR4N1{x<_CMLu~mGGsA z&<7`nmQEj3fp{Lvt`&941mgsM??U@b_54!OI|cyRVmp`B6_C6XKl1m)hom`i3l21V z*;u5ghttO8sKpCzg!)5f9rh4|Tib=SZP_YPeiDO&^R%SKS2MZ~m0~}8;dCqm*qcXc z(cNB@#r!3P<(V%a$12mR;{+f6eNJi?DHOZ7W3J1I3mNgl&;6RK5g-)+#zl?=PqgXr!Wg^_erkUtecu(XqSgUaNt;9;rkH#2x&4UJ` z>)m}fPTZGdYK20W`SNk{!+A^n`8K+aaltyjI$^^$1|zh)Tblp+zWq|oNzNccZFvMw z8Pdua?XKx*=6+s7P#3k|c69Y-v!!=Tags;!G;=D{6)UdsXls1z#ly{wJyTE}c^)m^ zA_do7VpDoOha2iDveCV}d6pdPwTS7gXfC-+uPW7U>5>-@)T{0=RMVw3I?rs@#BGe% z+;YWQT&eCNH{0yzY8^o+(rUpK;1XO*xY_^!QaYYED%Dy>AX4*qLRW177*!C6sy5s5 zxB89mPln-P=?jOd1}%T|W`AtC4HCxoM6Qk58~y_CSYbb-lK1W?ck$Tp(#x^ofHB%S zy0|j2w*}txSI1jDd!=*PO~(4NoJn%cJ@$-JNi}p&xnkgVGiLs4;|hP!@tRU*=8S*+ zresX}(UzOx*QpTieK$eVpu$uy^(r34NEdaH{ma;iggzqoU3SuRw87YVM@JW^}P zo@p853vV}P`nJ=lPc`wqSG-9|WyqtZzq3hmoEuV1T>^f3)553eVsA%lje^zl=fg)E zqb&Wglm?Hx`I0b;7SJ|t1ejSJDiK^Kml$M{%cYa6t3ZuDUEu&fST=_SYt1jk5^N9~ zNloS5$e+e9!vZ@#9D0KSwTz>88{_p^Z(@1!;YF7xc|U7yamKGwJ>o-eucoQIPoOQZhLA4GAolgymO5J~4E?!bWv&G6w$n>*>6mo$mb;fvFYEEMt!SzMbW1l`K=oRvzA*MZ2=aY=x)!yU)FzWh`H@oN$i{<^G%nY z`kT(LdhpbuT*{FiM?(aul#IK9-aiBvX=nX$o@V|Qu3Z!U&r=n4R|VW%Pc2GUfvcaGX~HR-1RRFfq~Hg(i_lMV#mvSPQZK+hEH%O=}? z7MT+6nhM!C86(8wZtMvd*kX;t^pZ5UIW>~2TLyFc&fHo&N%drsG+-E#l@0SkDgMS2-G*adaZfjvfp~- zjV2SF)dM!*mBbD>*MMn{+Xtxp&aO^LzynkFK>v%aoWf~;aa<&>%@buS17#r6l21xx# z9J9$*M$a061amjQQMuH0m(W7L%aWn`&!Z%(Du+@I@5m|Pj&Fx()-H)M+;7tdJ%Iup zv!|&GYh+CAq~2s09&9?D40e>{E}UFW;_cqIITb>&J=(Js7B?YjTxn!(2De$E7hhbC zcMiz*qQv3@)Q8)RBr7O`F2;W_nBxCr@z+DOo8fOM(mTWkn}otiF6Vj>g{7oB?P^zR zesY9&DZs!D<7_X=u0Y<}BlDZNjAwqW#=YtH?lH>m2MR8}V3^PeKuModGG&Od2QO5@ z;r%e2Y|+Uv4}~J`cX^16Sv`+-!XAK_P}DKka_mv4=c`>1w_&khB+Kpd&ghgbV3gtQ zNE90^?7WNq3EfeIdVf-#*g>VL-nt=32LeYtPR{lQ0PBEM4};`abLtblrO+LOB-}*A z*rDPEWcGmwX$md=LQz;XDR-8KU06)7n@AU`>lxnbT<9G|j&DvZ;}lOuH3+sQB~HI1 zx4N?fD}~=S>QSCDG&k|YzLGr1yuQ#nK=Csu_`M0Ob$j=>^A&#g90ScDkuLA<%vk_S zM;`k0cgsPBp3i{ic;P&3(1n4&2kNCidL}Y-RIH8PY-YqVEfUYS;;Zn54>#3=kfI0q z&>%+XVUkzqD<$s0VUBl|33uCe!YEC?4DXVH;suTehS7)QuBwI$9%L5S_(Qc0^(#R{ zk&Fd9%4#aKvv~>mziek8*sV`oxb1aDaspLRjI>8pX3_cW*n|q~0RG!#Hz1~5yuK`4 ztT;6Zk!V9B?bOiS!myo=t(wC!!b5+r>x-XVo8)so+Iu}!@z=jix0iV4E4@v`nCFXa zoC{}?o$~42n*Q?2{s`bjVyRLysJK6Bx`v7|%VYIV9Z4=Qu(GiXCBu7zANd7ZOO{Lv zN8rHaI`ubFeVaoLQ8ip5P_4iFZBK9xRD#T8v1-CqlUW6voXl0JR(r}YEq>v$M{h?( zt5#V+^7fO(?BiIU4%;%>Y1;?Nzxqw;0gsGeETt^>skiUaaMO@h>Yn)?KSi_!e*OCI zdr*y}!&BZDqb0r)y!TXl9Ph+sNv5MRCX3oqb9;W zjjo6sP>*8Ue=H-D4QNT&L)chw2x3EyGf@Huy~{Knk>zif-zr~Gzo`68@Gd@3-S|xj z6dLa)bP^&DukP~%7Egu9a^n5?kf5(0Mi*f7<(%qN$Sqx7(KWH#3VPMsd;TrcizQBD z^aXl74Lb*`S(m5?zK=dtPw9DN;dy|WRRAbyyJ#=&V9jK33Ak3c>O%UV3YPc%jl1}F zU4;FlpMgaD=`J$7K_+UdZYf~Bt4qEzg?aJgYAsNaE-n$}P1|_Z&^Hmz&dibUvALW; zmuej6YC&OP45Vr0#HaCc_=>&cpW^W3jB^tiAn>mOIN7NK;dM`AKPTwaPY}{ZFsW5I zJSQ-HMEQ(9X(|YPCd>SFhQ97s?S@l+v5ixHxy;$d($bmXHfG+9(eWjvpqx33DJEOQ zDOqM_TvyM1m7s3y*sacw6@hpwoFuo>W8)Q)4U05HZ&v*>pN7)}oGgFb+MP2!p3HX< zb=%-ia@38p`f{?>6coSVk8utOn+QCdV;?)eJ~^Mxa-+OML$PpmSzf)9Z)kflG!<3o zymU0%bwQ7VtdW>IpJ+?EK8M9hqnn1uehUr;wYbr0BKX})ejmjVlDZnEAaO%W7S+jj=A1a0#>=31Hg;CV-JbZ{+^3 zTNoo_I&DIT@hhWZgVSY=kL2aqNYVFnZKZ_%)*#&H5eXI)Ir7*FR*Tg6mC?RA;T%$m zB!02zc)}GF{gA;X^giXO{}|zW$NO2PLQC{lok>dJeJo~T`$?nFv6jZm*Tn}>Z#IAR zn_5X;&$U>$L;Vy;1#wepxkFA^V!bw7xVL`r_L5!o-HK6^bvH+ewD7ryLJxwj4h30) zX;&HopN3Xde7e<4Z+hk0iQc`M(4<|oxj3KmN;;w}jx7i^u*C2HO#hV6)>`Elt=Hs& zQaN%botJmPX94k`cA)6oyCvvqj@|eBW`RW2hq?oUdE*LWp-&Bvm6Ue4sm1EzTRZ~g z$6pj6?C(rr#NeEvv!V4GvHQf5q6LWm%oQo}M?JL8lGWtASJF-}-o9o9kmck zBHHV*_XKrbc`1i`iZLT|Kn60Vxay4rJwr`u)#+1~kEFRR0gefO~m|`j(+c$TDwIM6=3a@B6#|?>bn1- zW#k4eD#ox4J#d!g?Qd1by$!az9JN^mMu!m+d5A2g8z-FSh9WX&3UIiUuEf}hU9}%S zwV9ut&jaO_P6O+v&C80bcNwGiDW7C#z`olIIX?d;Axzvx^aB5Z8{W4ts4~O#Amt45 zzw5(*;rW-Eey#g)=J9-*my3j!l^U%rkv?F*l%g|Hm(OA8qr?G!Zx!MxL>~P6Py{SB$>R7!~+8YXt z_h&s5uGjk)PpdYj)T|@ISXCI9$G%$5?tG?j%_@s;H8nOOa4>exg~6~F8O8zR?<6l^_u*+B;R5lOWM;CQ^7f3Y9oNMimFN0&npo)W1%t@afwbU$6G&2ZjOS&QF~hfUHtXwug&qJN zhb}Pma1;ObRE>3@uBoVw2^Yn|Rsn1#YhT~+XSpx-?7i*vBcCPOJ$K|Cjn=C<*YdQg zJJOe!4ZMi$JV!yrcr_`*fRu+Fr48nddL%lFSIw1QHZiL2eE)Ny^NRknNfLC{X`*R5bF;pYW9$Rv+d^o8 zLQ(0`A5CVH5a^S{TR*x5c*gD!YDb!xf?Ejlv1GmKE>C;YN-baz`FPh`9z_wOWvMn1C`yZsx`s$sY*q)r)A zdoJ~i|4L*Q(|V0LSR!L8cWdW=T4@!Zt)OR{LI$@uS9->kcEhdfYwYN;y6&hX^;}>D zG=|vE!Mj>N={&XVtecl}PEFzcUy#YPtno#jU42z84#|-CA;nI5AlP{iJur`X!$rcn zHHYntkywG-@|@QG3MZH#hM(K`EGcPRcu z>D15A`{a&}ZS;^)NnM%*@_|V&1Q8=8K5fuXnE~jSzBEOI4}YDx@H=T65??=tz#!lW`&u>q;;4x4-gh$Vx8_4b>nu8KZ^RFmZ0K zn64&Xz4oJbBAvJ6D(a+VX^1{!^MxDqh(w-vfDudmV-=d<8j@+%b)&EFWoBO>J3wAV)%8;^MI3JOs(PriCzA)ql7EJBTcJsVusI@Jqcdgu+NX0Px^VkJ^fbY zWOVV`zYvd=LlQW1m;+7r?ddJI9^XH$RN?T~G9MkWr@>_V?0r4-)t&e(5$PomlG-mr z>u*ZaFNneoot_%Y)(BUeY64{h=i?*n&QHkOJVN-sM-&ZxuQRc$yFPD=pP)!B&Do>8 z@#?NTy#keFU5&ux`PaAl4&&v`AX?HcPX!%`&Nr@kE`-vp0`iVigkUECZ;ULWwvi+Bljj)Ht{f=YBd*DHt*@nEGTmj%&YS_y*Kb$GYp_i~KUj zJ1dYI-~2^o&iJ<6r8Z%mVJdn}aI=1s&gIe;6fpHsaXGr$PYk_Npuro_0YgRI4Z-tF zo(^hO4A(gpK@!oVzPpvDdvnPiOWs46d+}@gh3xF?V;l?D7d?OJdT)?w^%qJ7-(}${ zF+ToY9fK>2a^JJ~PZS~(J^T*A;AP6*GX;`vodR<=J)CT4-`?Cu)HhHI6#i=6Eijmd zgnl4rNf8?wJr;Y?1y$u~5y)b)kF_7ZbuZh9S}nziN;Q&1v!F)EK=I+h@0sS4*IM6~ z34YopXSRH!9~izn=`#blEO^E%3NC9VN z?Is%~%3o@{{r7xDGm}=R0&k#XTF(4-SaWzE^h5U%W8qqM+y-i`mX~Mu%GcrT%=@us zYMj3LFK%s$nAv3NVyW$oSw-0GVaG%|E55TpFH;mjL*Y6>>W2#ON(0Jf(x1f$a(z6Iva5GeN%f^e9bEs>` zzk!7B?roqFeTYM-Jj{fjerouBv@=y%sBw;kx(N{jOG8pe1o0s+Td+z5G5s%|3(}rZ zJXsMB7cLA(&sMCPyjmh1fE1;1y%TJBI|_b;EhV>n%@F|zr0T29ZeI7oVViJZ+GB2qO+M)^izIA$K0{C@z-R7Hv55ci5rLi)|%f`IU9k3 zoYR^Az5d358&DXHmFOga5|Be=m5@Fu;OLH;95p9Ua^nqU@cP32n;~ZV*D>QZZ(Td* z&|rc@tVw~D`@L>Llpz@TgGg#xipTsYinYJ~tX*vg`g8(t$^fA~pmi`dwx^B@S?-iR z8%qq=priuu2Txya85MHNpa@4E!O%uebD{D#4TB+semyc2jVktwOs+}a`FfQ$`asWM z>*Ht^sD|=iOqpFYtcT&Hvv|>AFe`&D9FvCy1PX)={o*2bOcc5L=L;6cHh>b5J3bf7 zW@-5Y$EPl}qmMhv>@K=^(tSP#a!2b0k$2WeoplRT1zk4c%5|!{WN3JU0ZH%iyraI4 z%az^w#BGSFu0`l{3IUHSn8RKMo#-2;!#Cg+95HqY=0LS88s0fFo2u_Q(&B!3Tg!t5 zAN1}@?wp0@>;4T%QIh%LSJuD+Pay-a_gx1C<_e97-bv{`!<(ISjK)0)f=^5ADHSQN zqNf7sp3Ic1u`B*w?e%$bq=CQcR?W;oQxVYkP$JmWy3JHEm5@}CUg)4iR2tBw|1)`m z9YNV5li>GGjV>m-VD!mw(0c8nwvQ2q$UGaLGQB(pA4<<<0S^Hfl{B_H)=ux;)Im06 ztK}MUTt&ya$t%>ycm9ds%iDSj#3Re6XxMf87#Qfb09Iy6kFB;<3c>rPUlvO>YzB}V z<34=d^x}eN3+TyIF%y6>8;=hfoc$`&nDnVUn%43Yj*`BM8VEDKo;#wA*sKT? zzg(Z`_?=>)U3VqhxykW6yrb}-%m(Q+g6Ta7tVmmuaOjoxmfQ)fDpQ&$YO3EzFVk6l zKUbl8&GP4-BLehFw}ykR+{D2!oP)MrawaLyUA@@`H=Cv0ibfIUaDf%!B7=QjswbT# z%5*HW@5vwu7-c6s7nvS%D*CmQQ*R=-&STj9z?Cq{U%7G-_)65bLB&8RQe+Mk@7NhC zqG6z5(*mZ&74k&IM1^~Jxa-dTFLm1;@$ERJ;Nt)rg z_zRy^=v*Kq{cF;1ZW`Q?h=4u16hR=xM@jIz>5sl(8eJ>`L;()mg8mKle(pHg zq;JQe7r%vY7|_Gqww}Gowp6Z4cQ>ky0CVlAZfrtr?3odI2LQ3QGttc;D||)niWgMn zUy$|uO+tU_ocCDocU?zp{E)k+QAf_(a;dH32Y11F>eT*eB}RSn!b~Uh=c@NFK-l#) zt;BktnFDoV3uOSVqxhiTk)+I5T!XG@K3*eldi1qF z4CthMtB)HJQ;c*}cwUcp{U?M7!dj==_JasgIb?LfqNCGEC2UIY=*YxLc_2f@p`4BU7J@XAFh}1a7k}g+A z8-)WYAl~&~5t?aER4db>DNCL%fA}gKZbINCf*Zn4W`wXm)K+Fye96?xEwm;Be!H{{ z7gNpXID4-1&Y){}()EPe5$?AUEs?7vueQD6xIo4!ohj)-l25$5)Q*N8%sjF`G>n2} zrN~)#D~%dHfOh|qgtW*rIqT}!^_!EcMt1Sy_=(#i;ZQMbLkBboxA*M8JF1VOU3&%; zECbzqreo8|-RCh>hHU`aeT-nlaI{=(m$t|jW9V*xCq=*)Ej@g}PX&>!>)v?B6DJ-O zvVqmHnYyS4ygzWholS`o%oY=@HuPhWuOfIDRHc47tQW10ZG@9i?KRqS-;P~8nuGBSPC7+>1Kc(EqljVs0nCJ`K!kukU|vKOl{yr&(XN7Yra$ozTs ziZZ_FK+6!5d??V$O(`aj2)t?~G#~x{(h3nS@u6U~l(|GosyJ*l4k<(f`7ff%;NK*qARVUTZk3n z6YUpGHu*%?sWvu8&4#${`OI;0^d43PYFAaEz+ep=;UGz0-ygFjNyft|BXbS~(ASp^ z13!Zx%Ypp9{?pv}9>UMej)8!+cyEYtJnUSgRVXU9Ku-mTk~yuCTl@Nb#l3brc? zs1|$liIyehd{qhQCliILq)V2bnQwxad-k63#GAc=ZGxdFb-#Z9eqE`}qsy;tqu1+R z?e|my_)kd1jjB`-wn7~l|LIdJMM?!Kf%rCth?qfg+HWZb15V0Taz?o4!D2a$RLI9` zNPEaV%dIBY91dKB3tzs&&P5J#H=PBy>wj%iH_SKH8P?3+y{6iebha zJJ6<9>zz8sblrds)>K!IOPEj+xi(n1AAT9a6D}r#{h*wyZQq2;zY!d&k>8TX7weq} zq-%TR1eAv@%N%>w_odqFRvDEQiGD|s03k8SubQf37f4IlleFjQ_g_8(t1L083mh}E*Zh3IR&$>fS27?Z~M)@b`epy+N~+L!gGb!0u^A1 zfF_!BlEz6GTx6lat&snOyU_d|(?Yx&J0>)1L-6 zU*AUeBTk5{F=%gV+T$WtHiMj=y9q9R4pf!QeD!jil}5iXtJulNW#UP}qw7PT{9(om za-yzx!3fyF14sSZLrIXI9rsN|Sw}CAwgL6JO&EIwHPwyT=cIoAEa|X1=oWTfrB~nz zD1%~0+1hHY1~3&IWyu7Yyl{JA3uZkZfCk>yNUmIzq#{tUB)p2vKZu4Ca{Ik0BP2qA zPCzi5VarIoaDB&Y*mr{!w(LP#n;^t0f1j=ly()+O&X^CX^x|BujV)`@o0f6nbi$K; zxVE%u>$t(QMaEN=uChP#&b{77Ve!R@&&Z&L-u=+LqF4LfC=d8)wW_wIg+*TSL}bq> ziOl7fDQWC*2vmIc@X*ld)is}j+;M4TYZ{y#4B;AEeT@Ni#=GYkmjn>uSJqwHw4ZG6 z#0_VAP<21HgVAC4Juhka(wkZYo@8OM4TyG=r7KQ9jTISO)=DqLZjfNoe%w(|is3uY zEB8Hl+LWYk6#Qn3Z0E_xZut}=+OS`2?ChHH;?;@k3^9V-5hZK*TyD4v2d4g&Sf;qu4)RSG>9ml#ZJ8 z_U29S$kV~V^nGj3-!~Meq)YZcX@+VmP4Lka2t55}fP$CIs~;Qm&k87&m-m;JAAAfE z2eH>qhY)S1T%2W)s;%6BqZm0=zFh-%yVdPGip*Z=oMUza@G?eAM%LZoEGff_vw+=B ztU?}6nZ5gVDC)~5m?kH|f7?9;B&HB6_lH7M1S&vvFVbvE_skw`Ga{;i9h(d4G$n&? zi#mfgEBAOhUW-}ISl&l-$fYL;Y)?5}oX2Gc#oC3lr3F?{b9bn^T06dIys(u>o?@b8 zs@9+*6MwTTd*Hb_k~_{OrWzr0b(o+TU()FaWYhzVOJDkJZKy7z zajFrj)ZZ*^%+aw)lMKPhWZ{~X0qKb>HFmio*1;fDMxX2wlK2FlMZjH~95cuT*z5iaRkdw_&+^PH2z>6gZ z(eU%_!gSvqwU@LH$A4DPFTGw+;}Bnp`sUNm*|GVeGBzmtoR1(7<*oBahDSC9#1q?% z7HQ_N`RJrkxXaw;nR;TMP{2CfQWP>s%zU7JL_A5AeD&m~&*Gtf4fDENMSze+x;U8g z7>+6x2lMUFb&hjMMTKD0uS`&Da^_DTsZx^-DrvB$1i`Gvd2~cN$(J=-&>-uJ4l#5I zmMrx7Mr3inAJpg>e6pd5wF{v2gbQBWk2XgyQwIk9Gby}+GPf;MQ!#*e)2uH=dGY!I=9Avm%cJl^~5-yLQ zSj-B%9nCm;*MQT{8KP)RlNo@yI1wU#XNmY|*g=ZUna(Pmp8AxUr@aA@@(TG2Drw4~ z%r^fA!+aIgyf47Im&?nr848WDqu9QaetV>{{vI9EXR%W5e}T7nole9*BeQQ3U6 zwBIb;>XJI+@@UXn7(YvT%2aN-Bdb|>m51c%qo$w}t%K7G=jm-xx2vtjoad9E4Nwlu9 zpaLvASU<4W$M|qTjn+m8y%zq8q%B$Gmp2uQsBnC+U8q8xOq)xo1@?kGueX05ud1~r z6JHu)@I1zccVGCF=Z>;SR_ryYS9_RXJpF=JhI~ynmN!>We@~wa@=hGI+@JmAhB{%_ zZ3F=j6SghnTc;EyYdiF8i}Q{I9}OTX#zd4t>k=D+oA!HaT!$y(Sw|+q9x7}15glYo z2Z(Dkqfm-H!TyH~n%YoNFBt~3)+~^K!)rjAhEjYw@^3}9HBO427lrH_i@yKgoS!sK z#hQ&G%XW6I{c|=yp+Bi^m@t1=xnkHhc$d82$-_R-C3g>4&vXV+1t7ODJON+Nhg_HD z4Nqa4F|rFoK^PNWBX&vUcxcDQt{e-O_`dZ|K@#}BF(aKZrbxQ5X-c}WiF;koH{WD0 zFH8V^QG)wdJMz6sb+jUtP;a_(tA0~VM1LrIf3@TKO>WDXI0VMEWxjSwG<+ufzhh>R zd-Iig2_?5VJrz>d7hMi)SVmH4m34UD^A@c2=$-wj(K=j&tpqiK;x#<~j6kV5chYB9 zjttwezM1G3TuQXcv=Bn<-@8vHm;X_lVs!UMn)VjVBvL2Ef#iF8F+u#Bs%^I2oJUj-L4s{V zx1Wm98+B|8HVTVnH7Km_t1Ri;7aj!zjqjA$BWxIssolqQPp95uSLIKj+XX_z6UA>+ z6RZ2pwT)2UHQ|y9N}N2*1DZpYGVd+qXht|t1LUH5=6>t1pI@)msl`85uNxI__B3}B zchNEXq-5wT#!Je$b6Rf@>Q;9&S9}rR#+5vubNQ?P#DS8is&cN*rkavTNCRIH7(-rd z`>`-l$GLcjno=h)qg=YGngtx)tbl@(&(4T7PKo?|xw6w4&(l7Hohvt4hZUcP19jY< zh=`~&9i}H*r60WQ`{(HC@Pq3njs5^*b~d)VdtlH{wX)G;^6RsbF>*gua&rycD+nZn z(`}@lh{2sah_Xk>qxd-mkp5(RF=iHdub&95Ua#%Q1bYqICRTk2Ll0w_#q6+0ED9L_ z0m7Fr?9TL&8iUB&ZktD=ttZZ&2s6|cXYZ5#2{uVjnYZ2dqfDuJD|c>>&q;&`9b1u zi*f@|3cqkmw#z=4jrzC0nWBT#JdcVzV0npwu|ABaqlMp~r34HT44>&97=hz&D`NCm zC299^X;XcLuU#}>hK9_If2qIxcDd*GiMyk!h4IXI@sw-_Wkhs3%r0o?Z)sTOX6V5| z#;^|(tpZ;_eSJ~mOu+&tU&(Q)v8VWV0Jb!2yhKu|$>dH8EK&CURHV+~rhV_&SKa-; z*z2BZg$sjpUDy}Ss@6J%U7OQ`rEE}kH#PyT3?WXJvZjH+F8nd27{`Z68;Wz9Mh<-O zBk6(wV6!$2=J zB~#g?*^(rW%leG4 z43fCsRJ0!Jvd93$9DlwEzb>*m1^+Pomy%-{+9;Fb0| zmw7K1_Id!EQ`bL^y@HOc(tL$^RdgfSdf@l{R|=P2R5JK~MWJpL|+U9 z2+mbq9K`BoyvA1=U7|OPtWC+#pUj1+DJz3gN%ukLeRZ@?Si84X;2i=1FN)93M4SAk zlSPB}U7VKxW`YQR_ztL0+ycXe8-OfF=QwBvOFmlC z9n!T`s>nX3(J{T;LirMvhUkGfjaS~Vpyv3@x%Jtx&D@dCT!4^6tM8wvUwaK6h9i0L#N`K0+9i(4(Md*pG&+G`BEZYrjL(a33lW$pB~H!*1d(7wI1sGrFpu^kNjEw z9#U)yvx|kO<0~O|_zYk%Iq>;O(}4`u_`N%pvF)Yfu#gf`h^Wk)Exa_pIoeMt5uRI(}|D=Q;=9D7EQ z?T~qlB70?XjPu^lq3`$m`|o}KdVha(|Dn$F+|T{o&pobnU)Sf_l|OfasW0HfSFa+a z%8dRJPmr!rP8*k0s~oZFc+PecAINJoFm5jw&2ARUAyEWex;G{AN~%wJkJWgBoKT+t zJ0h)&?d_nul3FC6hD!pz7qip0=~rs^_~4v`)o`&;KF;7IbFGn`JZHa@@}HSyAhpWK zepY&6Kj!S`zUUJH@$;tG@;!tz&{UQ;0{geu%bVPinq1oZ$Htdc;zqiyUWV-UYG;cg zVnt2psm|1w&$|q#n^T6q-rk@{YH;yzG^Y)jj>o*L*wYaw%q?#L+Uv@M&2c#y|CAG_ zLY@oTP!S{&n^ONo6I*_@OO+RpY21?K;aFned@G4Xhha%xJtroRWj9(HSe6eo@kRt$ zjF(sat|zUYX6h8x1QRl;Q9^Ljpz^wk?ts}jx35+Pf&dvoJ^RW&Kh+lHvxP!p zZ}LLUruiY&u9}4XseCThM6Zfoa&ms1Dpx3xT`~5_(Dl&;0-yV@j>P~xGj7KprOo4( zns~!VmG>LxFeDk@N}_$M5D|rR@&sTor2*<;N|u>Q`%ov zbM!zAeo2RJOmbb8dq$%%HEj8WydiFJ;OJD1~!PSla(CvM8oYBJgXP?1Nl zZR4|9FuK_lHn2H{MTpl&v2;j8I@VUUKJrlh)}W^6^P2JixUZQyJLI8-JF=8jR5t73 ze!&N1vac7twwMjiPkn7ZM-k#P%@bxU%c~rW4n;hWBT5{n3A?PP zmCCufP$SpqeI@HFcJ3fsLQk%KrCZaKNvw|H+*yqJ@c8?EkPB+J#h5C|z9rDpwki!# z3KBb=f8~kr2$BT+nUYUGzZ`t&QmdvUoux;2CSxq!lTqpcR@_%&A0Z^DI${ReLoYs zhCEs|LQPXEJ0Zk5ghfxU=;5|-2V+q^>=?I7YG94JxB)|m<1{Mi59N0n&A)Y&84aqMQa zak_lq$~wEP_nUpulLGG3JD%ISomS_jLh7vj<>r&gZVw5%PE49E9eaMQFJ+)4{R3xz zi3l5|Go_))Zjk}Zhen)4p8BW+5__5j^17|CmvRZTC_%GyEKTX&c6gqfM3w2gj zz|nFwc>4U$Y+^A%eGbh18mBOdGwE>uV-xRJAs6hZuU1S+=!y%5v^0!*E2H;&H}_aq zyl9YUC;_ONsH_^l&3-85MZ>)YR*B(buwb1FZ80EcBfsH=wpT%+z&tZEaMHCs_FR?K zxG=UJoU1>15FBT~DBX%U#}UMfNg;`n)#7N(?oZXvKbwRGj#TcGNfmT5+#JAEOirhH zwj%eDrwDGByL(Tmc(-AIMPAJVTaL{hdok8ZdRI;h^{G8}rOdw6*MyNQ__^?sBXAR3 z2j;Fd> zCa$=#6q=4JEM?TVq!Tbt^V|S%P+*oC-EEfklz95qG3MzpjR-p3pPL0y1aBv)%@( zJWqI@ymik6NxbvD$AXVUv)syQc9n)YSp*XwoTCy4&{5p1Za?Y>Ql^_Ybdttp7y;z- z7kHPxWJdo92^nAx0HH|_`;8vu61xD`58BmR1*=-zGdDTT&As|D6R{@*e9nfV#S9SfNro-)yree&d8o>jpGGgH$Cj8rCrl}sAJ)6Bb(c1woi02(|@${0g9 z;CK$B4PRy`$dT{)G;uHjbS?E{I#FB$^8pW|goiMMYr|`>a;bsU4Q2$=&{P1A!#nn0 zeYG`TAHxs8&3(PK24mnmFP(ReCZubIwXMn_i#9R%_YCk}-daES$?e7&aPC{k#aXQG z9!T$T8Ag8zO&s`jY?A0w9^!;=)45Cz9gPXsP&SaID!1|FQuOl|=$J&h)|%R%w^xnJ z7!voZO7+9ogmtzwepc?bRc(c*MrBzb7}NNG0Ip95T;z~I2r^OM7d{p%tF|8Iy=RhF zo=ZlM=W?NdLvApbE-9V3*b$+(iJclSmKaNpFAij{#DO1xO(kB;^??-XL~!y+SSCeEc_{FkH@~h) z55~Z8v1iYxbG0*u`S&THYo{)Djw#SejtVQeVuwwxkeez?(%v4n?HHb})=kM*26VtV1C86?K-YXs0eK}LLDmLJZcWkpDr{p$}Y1aKh#$ z&&G^)i<8)iaoF%8c{`#%+_F#Ga(q9nG*M-^7fPJgColl#ztZU1;&O1GgMWsog%}0W z!hXlPhUpNsjhB;GD4A$C7Ssxjx}@}RzMbz+0hqI?;1m*{Y_V|smZ11m+SbdFGQ%oy z!symV*}1uCw`I3QywYBqJ3gum)wS+l8EWh|R$2(zkQiluwSQs+H48$54+I9K#&8E) zl+MntS8n@YUkT-GOHnW@1;^%*W5^iF-KL3Jf+Y71=3-*AaNfzIWVKr^A`a7i*G|@o zmxBb4%4<+sWqwt?k%jszFxcExLIue^W(MZgSFpvwQb|>Kpb6xNAE&`B*0+29cICXc zy9=cu5$P3|3cLsXj&5~g&TI4iJ8FD=pR>G2*6U2a8mR4@Ja@S!;(9{4ekFRYfvR=9 zyD4_r!!w(Ta24; zDe*r4hD$s4>>^0AC3yPa>ss)%o|vs~`ZC+AxJU7?9OsQc|GLaWKYzdHSn0}ziJeB? zX_pRiik`f2n$BA_B^z{R)(=4xR(b(iWg^KA#}_jmOg+vyLRSoE!0% zvG5b_yO8U->$r+h%Xq>nrbFa2US(y+{>&zcly;S-Q&R z%DfXESU>FZb7FmZYcu*6-&-a+t*V_n!l*KZ=CU0(kXm~si5$Sg6s)&X(Jm6eq=@)c z^N{ygG=bj$Q^VINq0~XfUZT2=?8*407}r^i%vj<86*2w2C>Z17&1)A)JOk}r^(7KC z#k`Hvf98wLVkJ@+Um-DqDj)(Z5oLZhm@j&l;XVe5TRFX&EaYP zWuE$q6@Z;dGp`lHevWjQ8aAnZ)3YVI_oAt1A(Z<8wMrhcqhhQr)D{9@a)_>D{e2hB zq~k9$k#-wZ4!~dBgkU}u1EsbJk?0eF`Q}h?uFulT#F?y<2};28fw4ue^0$^wwSB@M z;{DZ$-Ga|_=ROc^sm3>W^SWf2U zKTvX))p9@}t|==iU4{9GfRk69h{1yh+ z;!+>ow6vES=Y%%80A-*i4whb|`q}Y9+nZ1PS;E&XsxeKE5M1fO@@y_hlDMJr`xxvy z8lW?Y0QQ(XdU==>Cgt3|h!tVyIS=jA7|&S^TcSNyD57}Tc~Wb5tr$c#1Js$OG=O-A zrG0wL6Sq623S+nhodf~^2PEWrp9&H-c==LjcS&c@zSGO-oOw z1VEa_pyuc;!7Z&m&nt1Rb6vDoNJHeb96%&kn+~msvqB;m<@MNdd?p5Qu#W{F`9LrcF60Ttr^cviC%hGCqK^QufMB;<(wD&LN2w-wBAIM^Yx&uI?-oFPY`pT4_@A#sz z+W464d;@=;&YFE!U{qaALZ2n*DkimK7&%*IE-mni3G}q46*$?@u+}FJfe^AZXqLaL z61frpZwe5feI_K5QB{%*&EfXDI1k!lzx9FN-x(GT5*X?;K&)^#nf(+7qU{rS31BW6 z#379?*b09+7Oy9w@%tmwF|q)~=*KzW32|OYiwb5QPR#m%8O*ODXor>e!@=Hi?cH5= zGK-F-vIr=o!D1>EEARopDM`hH@8N$3X17ZaKm-T6z3hBXSOtUqf<|n;kGob$^I&4R z`p1;8vbcLG++lLD)BPtLbngcmdf8p*WmimQ=S@sauLFWbe{LZ7hbfUT?N)G!9sjdW zEAF{fIOm*X@PJxuZgFHWE(;Hm(JYet?(*vtUS$631x*8wy_#MMc$ zV!b)dVc#~k`{L!RB5U137RH_ZLwCS8CaT8Tj8@GW_MTbaU-p(H7`Z7fK0+F#qNMbZ zYiMmgy?gHX-4y{*+Wb3hBN%8&0G~7{wr%SGnmxMU=l6Y{zXwVR1cGeV0FG~k zfSXBium+2c0Rga^EeVF5XNGXX;17CEbiouA6=#7(tsc~40Wd7rGbt0q522>afz8q< zM%TbVMLzVxJ%C_z?o1S#zhxWXM{dc1eBMy$k^r+WK#fcWnBuAcod$&b>r)t`fl~lW zqbh#ggSm4D=kVXvP$D_m*>p`LFb54(_DwGc@C)CBj}w%BFB(J*8ffu10>ldkxCaei zq*v8t4of^u`_sbRU|2|g2IU4!svl5F0O80hF$A~|R3O}r#1q%AU+)HZjNRbW7OYI? zQ#_%nL=K#`M{mKrg=MG*>w+dU?!$cCzrI{2<{Je$-FbC}^p09lZ2rn}UCy?(h^T1nVHI;BFlf9%8hBq0 zp-0p(A9?o1?wcYKQHP*l4t@aj<9)U*_2I_+#T)5`8h>{E@ZllAT#QbmUJ=69JGHRr zz@V5z+6moVEWJj+!)Hf$0BlUpz(PI7+Sok?bzcv-r{Ev2kZEA8BmVMj2OyDKQXdC- zp7(dRSe~o8v*T||IhzCUhC(pT>r0i(o6u8qWVAm|8*)5M(Ed*gua#6)(?Rl?w1^05 zmQe!ObEPu7Z74Gfz7-gxWAG%>NqPfAu*1QgYr7^4cIE01VLge;qc02sgOqSceAk;K z4|q8_IRTKeoRy6Y7w?60HN68Q_?Egjbtzz}chvX^lc6Nf%bft$t!aJT0VFe5hPK`3 zHh}ze_|6JhBdUwxiL5ZGjYq*1bu&iE+UmMTU$sSdk38RZBV{@&Dhir3HgH1pnoh~z z=J1ndn9iiY$dm2^*m199#vU!~XX25gegcS>I!`dbJ%ArB->HC_-~S@)=nIIwukO){ zftAC@_G{o8G#+cyom(-o&^zXCa4jWCo-4tlN5T?g`GKE_MsR$XAlh6PB!f68u^F-C z@Jtvl;yxNPkfqq+0nup)K#3pZ%Do4A;~SjHZR_SXB>Uj74kPf(>Y_h)THNKg5q2#B z7YE@e?(J0pk|!lYpJVU=oSHzz$EF7yE$I+Rn+$vS%cLj+xK^i^X>Dct)oucBnx>79 zDglr>9Tw?iv~Y1**~~{_m{ibL+1v7AOv=_A63h(p(EbOz7YPn#*t2aZMoN&e_t#)B zMjns?;TvT__|f2j+$4zwCAMRuRtm3Z?kj_3H$q8EQcwWGd^d--{!@uJSpdo#6`Mf= z_7>=2jQr8&husF2vm9c7IpRd(K+AK-0T%(-jk5p-Ch$d2P!7HuFey+c7?2BL!YX z7X(Pp)_IvLRGH#nWQnjw622m=OQ%B+Q74KUhL< zW$?mbC6G-+O()JJ6lRX{Z%hb!l3@hc_xC2e^AJ?S2UK_xM(iP{`0yMN=fQHyvWKByyWmaxA#t~IBoP{9iJFMaQpl`@Rtg7m#xgeOGe*WBd;Z{s^G++o&`F-)|DIoHV z8vtzGL!u62x^q~Yg05OKI3)q>>Hk?@#a}khpgLwdy}Y~}@H(iFgIHof>nGxb?JnEp z0Z7*Wc|Aq1(n;B9T>>zU>Nk*33d(Pu!?(8phjxmsL&zEYbykS|x&oL_nAEX5l`|ly z>UCK^h(!TA_3**F=OHkE^V5sTV*)}#koW?Xy5~Sz0GIuXi;Eoq6w{aLsp;t>!eKG_A1^`RmKL0f=F7Xc{e z?D}j^;Ihs?Jmq!6VY49M4urKRc=Uh&u3qtUQsJ)O;J^y>=MB{aZGa{wMb5Jc%6+`(#8 z?MFaUj@Te8U}T`047G0;lUB3+UmeJDQ1S7?_Xs5w>>NhZ$##cs z9e~GG`($i(p&=73z7DL592$8a8VDFm&LR(FXW<7d`|w^Ob1XEqYVQB8S}c?y7X~|% zUsm@1sFET6lho{idF;=s>*B^wXGS^5$#e8|e!!apyyrZaBgmHnDs^@EwUpTSXMA~1 zA2#(6&GoOmV5D|*@edcLqd9E+(qi&q4^Bf+WPk#70r=dbrFa^!Sm;t4;9O7+_QQ%i z9EVL^IY$Muj~ycZ!tfc;z(-yIa^63$5IKBBAOKo~b}3YzFjVTk9bWtTN4LUXO##jh;6Kzi@#+H@hWmID<3j=&m7!86eE66w*iCr# z1Y9%XI2;~AG#j>0*A_?!P@y}C?;2eZ2!#U($&aWYr4|$uJZ*rPlvGu5{MoBoK&%99 z(*a@7!s5nJr-ctMzQ3ck4o+E*h*9I=$2A%OBvHm&Pp=-WR%(0?jJ^Q7C;&}8d{H7B zeh_)2Aua{vVv9$QE+18gA6CBuf`i+WSEvu`X9Cs^{&(f#QLpfd3h32qASILj*;wp{ zZQdy=DgtsgzI+eGPvPliFoobW0Kmc>UWbykf%h2^L&)a;&iHq9B(&1jIfw3y0snJ@ zKim~E5kc22z#fbjab&||yW?BRxOoLiUyw)+@;uFg^AIW!$pn=|Nc#f+e7qtbx*j8I zawKBI_KQ#iPq8KY`0k5O>OMo9A=-V0* z;O%~0U)=_toSLj($b z5q3$Rqlk}tnyWuxXLhz^2M24VHUU?G&gU4VcZ2=mvR z7dl+gu(0dUNJ!-e1K8i~@V|09>`E?-h+D@`();}E50s&M5WvgPK}h<*f{s3p z{$nUguUPJ(dJ@LY} zO>Ktt=xu9D0jP4Jt+jzJy>9>J$j+emLC4UWMZ4{;O<}Kv&FzB}!(mt4qu-0l)0K7> zxB=`F#Poc-s9Ck+v?p4^WnzM$=;F~P*vxKkZzq7c_W~cfsa0(kfXqZt*N-Vf*~W?O z)(wbBSL|5Z!v8ha z8{HnQt>E+W#QqQ>k*#g%aA;f0ni!7nZ|xi%IwijoRmr=wtEq7(dBNgedgOClcx`IV zWn;ltKZGtFx77q5skcg4*s(}?DCItt(`&cWQr%RhcKP28{J!YMf|%>H0BO$`bh?4d zlTNu5TiXR@?s$nO(--`ce=iRcRJ=4Rf73+~N*R4oT{pI1^u zJ2`0raFZH;YQox@sRUGdxTlSEzDmsVY33@BM&1Q2rU(l4r#fjF6 z^v^~;yz4qK)shkO0U5)F9iKhfxbfS*-sbGThYD@YEC=WTOClPz@S!#f%I z;Z!x3g#KBIMr2ds`s=wjKO3#@XR(X2&(PX^$j=_8`O`0-TQ1YpJ1Vr*Rxx~78*`Fr zsvwDtdOwY>hSID584|x%O&VI^8t>vauL5O%j{fH`!_+!vwfx)0Y}B#OGkwJoD{ZYO zza~Ek`_Bo-O{bl?M^~C;8#617No*3_e*Wl|JjwAEbLavJI>KWjbWk&(ZZ+R9`JrWJ zeD>I*``e+z7ys!ZeWjiKldz6>!x2}4v5#m$reS2E$-kk-h500&798#iCbCPH+Bg$*8d}ltXYp%)^``KA{#{2XhMh@6AxA zL4c^o$#vdy@6A^g;r)`)L9JrYLNvoaMb?XN+bPK$=6vtubZtw?xd zFNeVWznvvZyOGe8P$oWFUFO}^qne$uKvGmZ<}mBbs$r2WB##_7+l87P9_C{2-| z!0PtRvB9g_#5~^IxP?AFKXeMYZ5ZWzD`l{a-jI-Bp0+%BQT{4gp1C>opK+V|mD)od z+&>oAnO#eMu($F+`MU?$OVPO>am_smVPm+dBG5Oge?KGCz-zP5We?5Wm>utSP?V}$ zTVrS8)uU;gckVw67yVNPGoi{N$tchKL7!hl_EYWl&uNpvD);-{CyK_M{&{`dApg!9 zxA(Tg-qWa*ju$nh;#^gm%`TIJx+a5qHEd>n+D{w)EE5=wQ4Bv8GcAcZB^D;RGLjae zk~BjVc(53v+K4R}%&SZY$RVG!=26W0XW{t-L=$$d)$TsltEu4Yh?nFUtwh=GZM5_^ zg+IJ1vcHHez)UoOv@7I{LrksKZeB)C|C!P)-zMfkrt}ZEg@iFxg2|9rc9XTC8XUG7 zGx0l#yMHK3sgtBF^xtLlLxTVKM-%I?jeKsMPD18xu!Mb^D3m;{Bvwop`lRJ^EjB=8(A1TYHdv8|;M-AK5E*~Va&q%HW9Fh4(^(p2NQE4Svi^G&St z%Di!;OtDhQ1I{SR=*J^^Q;tuyt@oB5yH>kxWNubIL~>^$=_bCt zxiSgdAye}x_{xF1_>BE@ku|N{aK>yA%geWQUFZ~f`86q4gjn=ebb4=LhV+ZI2gluu z#0tHx569NtEY~if*Zow(tt~NVqb1>RW8Se?MH7>3C$LDK=_s-*jftDJ9v*!xx$>3P zunjr(8WE0oyQu6qOb81qnP`s{8ar~{qjs2R3~4;uSatQw8>6u*M8(a0q(a$G0L+Ux z4~I?E2awv6x(nQM%xjwlsqNPdMeulX=%mS;voqD(=9L?^Zkkpe2cMJ zb{!Z1|B;@%P-Rzbv+HstbH7Aty)QHNR)r2%#GK2u)*EAH)D20aKXIFeEq;cz4~HC9 ze?n8<5s0y;<0-x%o^}P=yKGlB@A_)|n)y*&GE{rQ|*ZQ?}oq|c+6T7ZH z77x}}X#EQ4yy4p)!vK0l5d(bL9}c~?uoZoe1206^ASmEk;=6OQ$QOlNX4U5|?2~&A zs%kD!CW3fZRMv_^Aqk=pOEiqq&AZM! zJG+l2o(hAMmor6MH#p8me^BcG+3-TsX2)8CGcRH97t17++mhJ!+7C@nXFepiVGRe* zC-tUw!I_jW2{w=@Tz|%i74sgzgoX6Y!wnLImS}hL6LJSwVEG_~`LG{tAIWc{+cSTd z*PA1W5Lk-W$nMSQ<)HS~q(ue4wQff+2-t(_K#o`?U23uCc8^C?=vIKs>Vopcf(OUt z*G5_$a$j-0@Ce&=qx>wFRiO$q;>A0hg4W+FuzgB>rDN!4_{ORK)bEdz8&y}^(9?Ao zxt%?@nz{LdYFq6{nQ%DCsmZksPxk_JBO+ypRrgbg?9QOs&eEjld&>%$$Ym>PszqHF zXVGUVFb5Dug73CZ;5EANi&K0=st@ij7IjK6z`bK-sip6|HyI>+1^@cx644k1Okw6m ztG%{gaxDB((xsBn(d}yONRoU8M9hox6{doNr57ZgPjAQlSuI*cngU^y< z&$uSQ>c*>&;*5-xPCdR__XPO37g|$1Ret#kV3Q!gFG}1PBKI*p+(dIyd5ytJXIpJ2ne9>a)mtFqp&T{FJjG6362>T6^6t{iH*6eo4lMT8 zvyUswvy;|y)OY;>06;`Iiz?03^6awHyCU3#H@l4UYF9!_bAvZq(aci=@!iGH?vfX^ z2L`v+#|{){i*|AA%g2W<9{DWPX+4-^BZx-Wn8jMCIu_f>7|M{|xULl5lE1UpN_96o zQ=N?Sw0e3b%w@Ja_32bwbYXC&g13y-r#M9T#uHXwgS7As5qMUVII(!EGL>fmi4cW) zubllJ-#Z`MgsOvv(AD4l4YnN~zn4wPjmVG&)!X}_=wljhBG(Vsl64lz0;jze>Onva z1^=MJM)V7sJI&J|&qnkU4Z?R>XK6-QeNhN&27OQ4=QK1q>X@)3FNu=Kmbg>PXOb*! z9!v6c9OQ=;<}}wBe=|mhtln}4c-*q$u$zAzY>gCUvX`_1YGO{`xTtHUEuA7mYNpJV z;2kZUV$mc1Ygzf)SVDG`bV?RtuqJGxWosi`RpLhL)rk9=D{{}~24u~te6;bN#-P7l z=HGJaDBs@0EVNU&=>}X>pgzB7_4{h8>C2zl_c#^r=8y#CKJa0mpXq8S*n5%mT$UZ` z%?R`6eKw@2Zc(NX#bbuupD&n)Heo#fxC2{W3+7#iX+2}n%+=f3T!zme;m=s%<|y06 zmk+D$;6tawt{4}CSj=}dI>pP%sn~;Tornf_I150| zyRxcO6u{?N;Qgn;=jL})R0umQ3@E%9rFdCkKdqvTB6_AylM}VE4h@p$TO;in(JXAW zZ_@09({+E@d3X}K=D7#K3;?%2oLvnAHisOy8ECudnp?T~CN7G5Z08GcrWBjJFrMpi zy1u|ra-E-~?&F{R?{ojR?fpwhC;?jH^Q!d^gHCU10LpxggXCCm)M9Y{j7uTsk_kc! zhQ0Sk7NNT*oIcNnfa}^fhM(aC<4Px1sedNQHS+^=P(G_IM7hE zxKDn5OryQO44}o2)78k@nhoFOiBH&HFe4?!66RkRVbIw6aeN zS9l6?m)`)Gy&U?@s}$qUdfzN6VRoOjV^Q%yn`K|F#d(1)cAD$PO4<%jU4I`fpC+`6 z0#RU*fFei8wxB+etb>hD3j2=tsE6KClM=-9h*<}{j491q7_0TBBFvP&{YFxg;^5vz zR^!AyrHeCw@RO3CKf{5Q6`ZHGSSfUED3&LuB{r$CP?2&q25h1?0dmgL5?h%wk zE-LB7F@r=M4wo<}@MWvoMarL;!dptNCrkjQHQ5m_ia1wD-dU5BHF=`$>7RZiI>Ik< zC*@{jKAkLJ`bnPNa!ogM3uMq~jg=Ohd^EVU2e(Lo^u?QCaJ z8J%wx^7W6)?NWijY-1+JpIsuceukdJjsh7LlD$2Wl}f$2oOv ze~zdHN&xpF0>A5!Vrl3NLXK=pZw6(=#B5Q~9Zx(A!T^I~AC-TnUhkCQN6QJP&sCY0 zCeLns0)WR1$5lltID?aOG|IYb(e?O+D|G zl`fX7?-+2gQRy~sO@lD6$sScnM)sM?`1Yh+kL6YR-ENXt&d+zZzBUj5)|3mce;2jMPIv45-9 z210vVK|pHlnduP9`n&JBlR|)87jm%7ck)7}>u-=lR#}XEwS1S4Jde`oX$BTlk zKS4Z+{h1(}cT!zI8j=sQV>jrK(d@N=0Lp1*AxWl{yVhFZdqjVSGxzfGqM<70yZIi& zuNntc5V~*orTQ`NMButu!PFMZet2+EaG~M1RST2KLhWMj<)>pGd@LDu?O>50M*6Uu zH?!(_Psy~U#9K-8fqY2@E^4L}r#w2`nXdAnGF-bf=y(YtbFUJ;L%ijAN}DOA7S+np zD~UmU%Zwe&{UzF^q2^G;CS37tnXxrT$85TQ`K9$k?AZ%QchMe4EmXzrYrBhF@ZHVa zXS0J&+!Z4Hu>HdrA)rlsrx;go%ykVg5mb;ZktGsId-GeKftrJot9hO)b7-bYM*7p` zez+sLtrA4U7HhyAvbNgYFix@C3T!1MH*$t(lSmvmFJFz?Qo5^Je7`ZmF|EQlF~rg7 zobh_d1G@@N`20H8ulLt{+b@)IJa{%69m=Iw?N$t{llwzVl~PF>i?yo@NPeyh@aw?y zm9|qjPcdEBDty^BQ`PWNU*k=s<9uIzJ=}H7aBrM8l@zIgYg=7r`%!tHnNa`Da&Z?c z<{li)RX^K%hA3!&*;Kbn!)h%zQ^m}C#8GiO=dnn|KnH@k;N_w)Z2yl2Q}+dc#L|9_ zAi7T3vY;C@lgqAk{Z7B58Y_Hqik<=@$FtJUyn@K+)1_^mJw~>!TCnq1ywqq6B9Rwr zuK51qH`~5Cyk42EfHY{BS;BSoW1DVN()#e7uF_c5!h<+~K7`82QsaWP{wpAX1b1A= z*iSWIT|m2jf@)UCkJjo`-pMUoObzyr;5koOL@&FK|_>*#!Z3_>z zP;b?u6jH29GI;DZ2?lWv)!Z&1rarnf_gS#EivGwj?-Q}lN~h?nR~rbr<2GZT;sAV`_Q!{`xXh|;Pu%>zAJ>eh&k0eRXV}snBoCw#s z%;S4kE6n}GmSK+d)v6!FZs_0Z9Y?%gRAnP594f5oaK{LQWQXZ5kxmW1mB73gY+_-!v_+z`!KWqPT*^gX;6&d3kfXwVAJH#ARpR;MPynf2(O3Gr`LcA8{x@voWUSig zRpde?NtcR=r)M?F;=}D+E({H~Ng_K%x2XM;l5w#Ib~e2_TWfIg;0aT2usx+T2J;?~ z#ieua_=I4?jJDTW?%gwWtADtuk)o&dMttTweT9qZwrh!5ue*{Pu1%l{I;&>vzcwK< z<#s^Gad!Ymsl*G9(r}Wp?8utjyg_EJ9>KIADQw|$bjsp}QpA1OA3-EPK;lG7CY#=x z&DW8SVYib!4*Zui3UssxAHdn&r?Rr@%rOz`Ly;D$Uop9v1%+CkxmwYVb^s>D{Mk7*jhHUCnR^80m`kGd~ zudkcetIyjQZuP`YUJonr%VAN8V{Oe|M7RPKh_KG`k7S;BKjB!Q`{n|WOzf|HLF6Wv zuu;68%cNKm69d!=mD729{T_!d`;)Z(ttSVC9tlIaHql5$HXK(`NfcsJK1(b5f=_MN z+oM=RIZv`?2kOWL`|UeMC~t}{_aZuoQdR*EjGFAt(nQcZ!1dRG-2_1#nL{nyGrOjI zEF#w?@qO<|rL$t}b2UfNc^xL?sQGVzyV*nzV^{j4HOhJB>h!?}k~-Inz!c{TGc+W* zUiB~H0OAT#6PI=R8qfJ15UJZxa=KSEDzzI4&9AMzArR}V1b8y*S^vlP`tj)>e~i7* zLPfWzLMRCD?YiVIGyn0tC2eFiVqH!q!~9!cWD>b3jAD%@$GjyrGO6OiT11NtXoM(^f=oZ)>;u|M$w|jKiagCLJ^6!*IgHzC@?Icq z4Oa(&7NS4!0pMf&JN6o5GJLJw&q|dobX%^m3_p__gc{!6w#5Z>eK4ePXepZsQO;Bn|2*N?g)39{_>~bjgj!$KDjmIR8;V#1;e61zNl4XM0O=DP9cZ1V zK8gzQQAs_suU4OIWo0#4=X)%r2_(X8{UGix*NOZNr?aO4meW+yPkb>u{8!n6>-!>& z8Hiz7(*JyW8%S5)3WmCsPvI@ef9@o%BjzU?L%b28W_{Y z{$6DOuM_rXvbq9+oivh^mg^m&0DxoO7e;~g@=NLgp}s3>gwN|G=Pc#3<1xjID#Nwj z-sPUVz&c;@k6jqWb^9}bRg0)mNbL&w+*Q%HDzo+ZB=b8K)er9k&+zJfx+ZR&vxuAp zni>g=hY8@9*q2{i2iPa2ADBxKiVtC6C` z`KwnkPDcE2(-&!rt|wY^7fBi&O*&yx;!sNfafzE8Bubp$@!s$EW{hXGzQ%G6taq>Z zo#7RCkUzJ6+W==Q@4B+fI61pKdV?E>Y;rz8?%USfn3u#>Wbfy{tM=Y?Jbbjr>*(C?+aNM6?D1dU%fb;hj9tdchg!TSZqfx3hlE+I^D9kJG>q9gQKJQd7U z5a;o|c(Kf5XLaT(EtZwmf37}R+pMXa2eM`jO#^X3d&pt?_s`NLHv>b%gj*loHWJ1- zHE{{cwHteH_Px2Ha4TV{XUM(RRLXz1*0ObaRi;#78$t^XI(-wo^lc2caV;IpUJpSg z@Pur6N5I^tiSU+qI+QxC{yX)v;&$WyuV?5gDm3C(K9n>$RjdNli_qE@fOO<&M5}n! zyD-44RgD~)dl}Vkl&T~M9G3`V64z<80|xq`X?vk3v8(!46GW!!lWSv6-^C24xNf5l zWxRb+!~=5g{SH<=HXe~!iDDhR{xzfU>fSJ*6%L%Y(ZHBiEb*M@>h9JuxBlIHD^_20 zS^@iy${{j)65C^SE<|$CSrt6$OW)&CQ}47i^#>S@FrwmsI=RzluPxbF&ia~E!O^!z zZ>B)PdU2e)){tv&b@%X8t)kD79UDxqSL>y(=0FndOkZk|g><+!AwC;Zwt$?C6`3cn zRSZB20-{&va-I9;N!`1ZlYlpQPundF6z~NNN0jaetUXyR_ITR82TbhHlB+Q9;bU%L z%s`$y1OqU76H^gZd-oaMD~?5@$lp@k-YsMdnb1Z|S&UOz+cysGVeOz-V6T&!(NwKsOl zmi<-eMBP}sggAfc3S^bRlVX|gvPS& zD3L*phb!vLG1wfKzIby>d2awV!xx(7wBE`@C+@nfnj+7TyKldL{h(X&z%DtTb?uEp z8Cb7}ugLn?Zlneo7kWdiJNOQ|WiRqA+W|t?kr~sW_^gXCg{QovcK%d_CWac8I4XKP z@tC9Y+3E3iVdoIR;uJ|BD^)g}|H!x>K2}{FDEYf5uN4jUA-?xrB@WE<$qI-@QY@DV zW9!G(%mhfeYMla57{}l1(JqcZL^#|9N>-)A?pYWX;A)ub*C!XDQfx&s3^(VJk#1Pb za<7EA7 zVnx+!H5}cU z*6O6WEDTZA#kFNzMt(p$rs-1d;qkBj?D+Y4z_%2~9M8oe8^DTE)XZda^Q_@Y$Tw%( zBPg1^%p>xSEd}Uy?(^k{UC%1R42?mGzA+)h|OmBbMxpj!V_C}BJ7CCNB)Vp)@YjHCPv*^hjRx&fZS3T5;1@QJD4=>+lCi{>-X64wDgB_6c9K zucFpX77fRwtdLA2jc2i^#^XWqH*1#9>2@lwRz-D-P4C{dH8FA+79`V+*V7~zxbE2V z!D4`JsAc)r#X_!++Re4_wWWo@qLi(+s#pQx7^w-6e|ZfVrTgpGt+2K*kLQ4xY|LHE zx40D|8p6=Jpb3WVeEG{ah@z3N?>SnDF0Orhn&2cMu{*NW5@6_6+h(~hKlc5Eob9N0 zso3_$`qv~%y7<1SI`i+IhHE1gUi)s*@JN}Fc<;^xN$#Rd!Y}3Qwpq(A)_Wh!DtTJB zJmQWctDX0!U zX)0P0TsRW_rLkb?*EPO_veWP~u;L@vk3|}F%QJiOYs}=oz4(yqVrHe9#(foRx60*{ zM&SnE?y~aYoFgf59Hl@we3Cq=@oDLz!t72>9ICH9D3fi@DTYlVR9Bx5SIgEQ8E7f7 zS-sJm40oonjYLSydngW6b8*CMq>;Q=W%pgMW+)}~7iz}7Tb?N#Jx2y@St;i!yLOK^ z6vCMdq~;HEYo|rLm~}WbC!RMF8P!yp-v4n@(%hqMNaKn# zPp>`^ed$xPp5`s##Tm+~_QJTyrAZ}k0~vFa0oPg2y_F_gamldC)cN{k-uY$|Aj6~; zh}_B&gwk-o zvU^j8Ud~yQk;kU|C%Z{R!+<3QTH+ClX_pY2w5hW`H#tu<-lrXAOlR~Qb&mV7DBQ|F z-ykMkCDV)IFdK3D2=|E3-s)*-?sd8}9y0$$I6{e%jF$BTG}IYF9WE`|*b7swWu4i_ zPh*YZq?wT4suaK7SSx6?T5-**GOGwVW4v?Q5Ez?s9gGJX-6eN^g=>_E)7Q2Sn2I=b zTu~*gTXiet5q7*(O|(F<`gngx&*Ek)_k11y*jJ7QKzyq^C^kNPfN;1ktH9Ui2uwrh zfgff13uzIElHLJk9!ceiVB|MvQIkzi=APC~vq+naiEo?8xD_|pb44SEAB8EkfCwOo z*$C>1`0bxM0n`sAI%&dsxveo|qtli4RqX7{QYa&IDk`7waX&rT1r7;HJ@Sa0=cl|Q+nrA4^zzy0g!byc2G)2kealQP z5i3#bqRv~Ej~FVFHe9=hrA}HeHi~EVBlRQpW!y~c>OqoZ$o?+5H@~*gdfk|(KH0mx z(*5cv+~)q%^eMgZZ@ziBetsJqj^z#V_DJ9`>UK+-=#Xi--!0myxf+w5gW(ECt5zw; z5&)~chgQSG!$mE{!vfUoFE&h#UDmQG zV`l7gy}NVvtyiEO<|7OEtLkihGMi!FJuQC5yyw>^V4wIZ%uj`yim~R*^=RDsJRE#R z`DwgF|He(4HwAtiSy0}ll*Gtd9CJN-P4%F9~wxWn4xyfIURjf`*YV==08Ph1#Iyt@9dU+?svT>mB&?f%SIGS zbY?#DwvDE7;f>+ANxnrsWYMcdwa1=Wv8v_5H)k1r2S@qgn0SpI{$Bzt;`EtW*%yv1 z*0%F(uivVu(Z_0`i0CP$^zQu552Bfy{neNL`o4sHt$<~yd;Ruf4)!vozaZ*l zt^Nk(_G@{k)L)76Z2PhU8cRPg$}!>7^UYZj<0r8oL}An^nB98Ts(+2a=inS2V}`{u zwOh?uSrqTXb7YI7wER&?2^wr#eeDftXZ2ET4nSi1Jf)OOrI^H-WVXkG$JZN?@{-D; zyBE6pt8J&6I8Ih4cptbLyAOJOk4CkabEWl0r*cY5e*lY`DMyl_R&ydZY(hz%S%noZ ze`2qjT-PT$eY0b=nI++!Z1Zk@Qr}oORU6xmMP-Tl2W7-}ZgGM4y`ALJ5@HaR_=-@eV8X-rM%PtOT7F z2KLk?z;*-5+PnECegAces(a@4Z>_r5+{TDxchBBw6W9z+N8M<>X<`hV18%_9-rB9) z6Q%87!z^0Y7BP0d-vP#?S?R_b;Vd&E*&|gbW>>tQXmDPWeQ?Xm1e5!UbF@KgkFtrSs~#Mp=FXfPPU*veYe zNw$-WW$aTEgP|J3Fd;={2norWVQit0HGHnu_@3V1zu@!Jdw$?{o7ZjTb-k{+*5~8? zxSnkP4jo@uNcT8br@I55l{;`sX(QQStGd;?=aqv3p0klwsiScx9DJ-7=9nDPDWbsA zDz-?!;M&}|uvWYJwF`He=hHH`|Psb6a!qO%3U zGugTzVn$0z)qvVZbb)70@BoS2cAa1BSoFh{ykKYP^0d8Pl&dFRono2!DU74P%J+WK zjGS{W*&eyQhpig4blBUi(f#$SO+`f?j-V4%`f0kV#@Sf+<)dqK8f|&@ z(|6;(W!tdmgq!SBs_akc-^YRSECMU~J)towYhv)C`Ilx=gQvx(D*i-c5ft-~N%zgM zeD$-f*$-C=_}#5_*28K3Y(FHbVtw1G*xOBFvFLyk7#1v*8CVxSvJR!H#=C6F@TyC> zc{g=@vwn3{;LBysBkg;XYLgz*c~L|>F`?<2;$tz_dtR+Fz43t&S}bwIh-@;t&evJw zSf8R)dzi?;+o0{%=RhSChMwn0+0@1}WbrbeIZ`$KY_n5kAs^~!jFsovu3f#h=otXD z4cK3O{+@lii{YUhJT?|?^`(=9k*qotVIA~gmTaccX4f5DYloN~6d!G?d5SATVOpP#hENXv#Zc@R~$*C(|BHEFQ*r8V3xH{QM3;-x=0BB#T zi9BDx_WM+gQ`K?HJpHmid6EuRczYRj8GEJ#pW=-r%qdd@#3mYB80is16HuzVp3ah$sucfY!mtIfvNAUy8ZvmF7}iUc6C+`6+jaV{JyP&N zexfTM5W~n@t%xl?oNom4@4@XC>+hwDa^{`63X4|>^GK5PHfO8Ruq3GPabHL^xnq)? zICCr^?eadAC{f}i|NSz6nAK)|g{gzP?^CZF0ujm+sV(>16*Sl=t+Wfnj4-v+U)~P~ zrc@zQl+Myg(>aSa``4M&WNYcl;zJSoD_S83HQRa_%J;48&`Th!?WZ(Bvi$+~dVi_s zHfaRc(;0id{^tU-vpv>$5W_J!bR;=%wxiX{SI6aB_rxSH|Ap$eIjpsHJu! z+PHnpiu|OEtWjUBxYB%5a33O;>$5COyDs5omK}$F+OlqpTjb~M;WZBY?v^W6ohaC9 z7WA6*;HUYnp0Hwp&8F(vNhl9WPhh62QyfaO%D*~uC9Ms>H*agaj-L{-Asr0K`0 z8Ft@r-5+@)pDWugssR>Qp(}}qS*tz3IcT#&tHgO1HqG!3eJBq#Ud?NWKij-t_)&Dz zUNI~_I~p94%>r%VPoU2`x_2*$_46Li)4SPYUmPfCKHp_~>4!sqr)C7(l$!_$sp5!8 ziL31Z(h`$w;eQoWU!>)}W;n1_`){WsnX}*)BxnTxfFSlyU3G)%2QC?sLyhDmrdPh;ZwRGEJo!U|2a}bXlt_ zBj1zcAaG+cojg5HMvy6Eiyj+Pa-L*#-|q>kc0mQNL4d9D!)`qh{{ECR_97$Mufc48 zuw)PW{ImC95j)JEKRypcF@jg_`1VHx*2nA5+>U`gU|HMzCb*!+=33cCj(^E{(z-_u zrUZ=s6J{RP86n3VNL;s`WB3<(cnJjjuyHD--2COZ>G3#rZQA^}`dMpE!D+z&e0}+w zTuH|l_DQOmWFPN?qAoW~5;m0+0~ zWZ!x&sQsZS(}wrpj^;u={nMx1dTd!v@4RKjHQvumEJ~w3FNp?9Aipzm&#)6a2kDbj9hGJ^bo;KFQSjZ$()bxc~U>ETH~|Z-fuYmJ=r2F@8B6?&J*i z=Ar~)exY^a$;B^#qc=v_%v_7~5y3O-rPbGfE1+sisk|N>4c-}pRq)k_z>~?IT>k{g z5;N~8a}zTyS=r|&0^3UtZ5o5!!kU~9kbO@9!-Y5AW5{DswG7{6%f#t`&?QlKwQWUa zee*OVK*{TnhR+*pA~$D-B33Q>)_PgP$G>eQ*D3RD&CRIZZ=K+&uOep=zTpn;Zz97M z*z@)z>Hg$!K0=*jYU!Re)$zP1fplh#k`wCXw%cQKLU`aSt9+&6^diGq<(nl zxPZ`@V}t}<~`$dF#QOb~}lzkaV4|u#h9PrmLE}$NPqIzKlg~>i%l6l;XrelXyW6Be|@_Nf} z6?D&^w^(n63$rP2XDw6$S3J3r%t0I=ocYpK`GHUPi&(5s(x8{)!aDY?dUwuGHY#Xf z+tuT9@KRc9-at&Y3MwaY#*B?YybLZuXSF}fpVE@u2PCl<0n^)CC8{ML{=4a#tQz96 z{pMO%@W>O_bO;tl@5_868THejMU(o>@YX8mAkAJNFGMc)c?_@lqV-Jd8JIAS7kvzh zjnd3rVpIqvOiygC{PNMto`pC%fpK*e! zj4U0B)bie^y*F#cyQlaT+Pv(EtaiS5RgiHbj=nc##L@cx%G-N>bB{{BS#{m&K9oZ3 z6Y$GDqAN$R8E46qchD~Z`hSK;`s~mVWpXpx{)re!tf`>c3 z_+se5ZjLB{+9JoNGWV`$%E;ANHOF6xJ!W+3j;*Mlt?0S8Zh?2po%_pcc59gSzH04P zzGVwxRal_|ao@26cLx|@&r?fZGJcQe>Y#n!5PX%0}gBtm!oB<45WyR-+C2o z{E=4=*2LxL*|E|mSl*|cKEFzFF0rhvcFr5D@^hWKuh5bDJtIN7mE{ z^k!!3T0U2HAChe#1U1C0{v5LyI=$u_A~l~XfK9c1+BQEi>RgweRw{|LHR9CuNNs?* z21^n#>G40^`_$Z1R^G3k<~S%lY1UaL_Xr}q<4G7G?5sG6C@JO1)|U-~&SHAjO9f{j3O0 zzbLo_pze=O?RFu?oF!~Ssm2}o#iIC39|@t)axCtH0uRR8*XSPY5^j9fWG;L8feP){ zgi)z;-sM9!p6FcP;W|D3^j)*$_%<8fr{uzusY59#n6cE zmd*|o4%bh`h75GLS{rpyECT|g=L{`FlwBCBJ%RL9nVr-A8yLq@)Tk<>&rIGbxS4>8 z?pL)>^Dit=A}W&wWjueQmXo*W4v2)J8z0~O$tNYue`Xlyy+ZE>^Mjf(#W}YpYVf1l zlffgp&fPho1k5YWZUx3X&e8|b8_`7Wv6D$uKZB^PaM5^NkGE5EM*{KXU-HI04Jm*! zbh~sgy{7h<^&$1{BUN2CNSm~GS4#xoZ-DYBz!^|0bjhQKMHfXikJYSg%U;udxIAs{ zc6iaoZWioRNq`zoC@X&4T%C4=F!u~ssN+n(d?n=Rd8fmi58F$???Pa#E-=qA@rx`I z#0M!JV{6aH#r7?_q8GSfs9cag{^|JZDvH=-wTPj09By%*QeN&&Jb4)&4~_Me+oYNu zB5KLWun>M8c-R8jDU&~(*&TXV{2Cu$Yc|4jcNi)Xe}f3`>0O4G<6lw z=v-!V^V?hVOh^!%9j>Z*r6q!aWOYvYq9Y-+NAB1SkgSA$=e#DfdH`Zan~d>R@ugos zSC&wfu66<}132L__5;o+!$Db0FKppj|1gs`AF#zc=^OH+z(W5td8>~m9Bdl*9;|oL z(H{XAjxKNHhn?AGSi|zU0hB~+DTF8P!1tS7=rbq3e1z;Z=+raV{RN*pk|bc!VSrK0 z4}#wtstqas3X;Wh$_U$h82wi|b`-4d^}Xin6jjTB-mQo<3AKYHFt+)n8@5*ry$O~T zK;soqF({VHn*R)FRqBp#9xd$;+*9uzDL@mb5kHM90t8b1idzu;zPqCsAWTx)+(2v- z^tKsYu;Ea=0iqV*x5564#KK0D@*DM0r+2_(sg6P6c2LvuE{OodFF zofEvUXxhK9F6Y#oRxo)Qog>1jd@m>Zy5 z$pHlE^S^{8KrDeij|U1nUjJgWI^u$LS|Ao%M&p`-9Ktaju`YS~5$m(V8^`ikU_App zD<^vd)JV<{U)|l44%vs;C$9E_NE!y@OgSSvw6i7xyYvx2D03w$(Q#7`!USMvJpKin zL3%16KSLp##&AfsgNh8k&hFRsPak5q`C1?o&>{qf1OR&xs3s&QJrHzfl#^l7qCbEo zzBmE8@UNYj4!QtdDt92P4S1Nq&bDF_#5KT(qIioRbXU$RK`G{qn#X%#nWJDFY$*K2 zU8?*8dKSjgAO@5S4w9c#{4!0$GAn5sS6fL2*=tL8uQ_ zk?@~O$Zj$OrSD+Hvpdbp24pmee*B*n#OYlj=caPqW1$p0jy@$ZKP&z1I{(6v7j5whj(^zs(`1Db}>OMfoYf6LR?z|-M6Q| zw|;wTgJ`=5zr&sYVPYzQ4~9zl^|g~_JjOy25uhzq?fUR@4YV0z)G$Q>)0hW2&>-J> z9{g{%M`J;lg%&6_0`NFHbqxLjWBmUENujrX+sU-Ea}_o6>kiPRom?>gy~L@3|JKHT i+s1$T&Hrsr*^1KV_y6?steY*=6itmR&QlCgxBeIZ$AAw2 literal 191695 zcmeFZ2UwHMwlEq+K}A7DKtMnNk&g5ZDgpumAE6V9Ql%vH-uy&Bx|9&<3Wgq#KoUYI zN{0YS3ndh#3Lzl9!;O2NefD;r|D1E5d;a}D_uj_|@4RcuyJluhTeH?0PDW0?0xoN* zYpMfIodN((QGS4v=~E+`s;bt10QJ;0AFKT}p#wm1XYK+3V2HOTP~*W(Q#13MG++Pv z#m}^-wqEW(-~WlD;N2$t#0~%q2>&N}{;TNocJ^Mjlng&85059Ma7tybQQ&Kie}(ye z!Zv?}6@S8h-tOL%Jb(OzJq>}X6xfae^Ev(lZ1WGWt-I&X{Be{#ieOitpJn}YKWlu^ z9%5ubd7q;^HvnD$AV34~;Ai_OVTyCh0{~>V0064je~+_C2LLb;0KoOJzsK>s1pw$@ z0s!dGe~2GZg={g98AtUI+kOG6Mh@z5oChKz~W2c>hV=Zc>tNQ0nDI z`8fhy0rr5K08Ib{U<(kTK;nSA05O2{2@#-5>CXR|`-$X#hJo_+$w$DIb5ti6;;2q> z0Zv~zMRn!WNeh6JLT;+R;r?rb&t0HBPeXn74AtqM87waYPEnmXb?Wqan)B3DXV09W z2T+|kd+Ibb&AIazu3lld#=>fvMi^$4GxPI*gJEL6B_nGBL)TpA;;u#wN&EQnh`oXi zh)dibxh-j6>{K-P{ekLJ8!35B{ixIq3VII895{XYXAS;ZIK@AAo|@*&*$aOy-sTEr zf}A;Z`t)h)E0?KhXwFapPE+c0mYU|uxvLDf&R@GOX8e?qiA7r9#KR{E3ads94iSDZ zb8$<_$lf>d_Vr6mtE?K3H1tY&j}e#f{4*Je=D98R0BF;%YHI)Pp@Hqm7~momr4Co9 zt^ia3HJ>vD3l{n|n&@jIBX`aOAZsBZoQgLOik`W-Yc>F0a=4af=4yOCr@X|~nj4e};x^A?X?8 z0uM<$oXX*Z)AVaJ(bvuA?o`tyhG%j3ho*9Ba-LJ($OxP4TZme`znSzSd{wUSh)k9L zb%ExW4+6gJ{}I9yeDk2!=>!mT@B6OYr(eGK1=$yQWjgm?ztH#v+yD0BRo+#YtfW>j z>mZdh{l#oz{BRigIptKF2!%8Yc(83DHs80{AB?WXv>_Zr_s6oc=R}dj@Vu|#ua9*1 zUF>8)b~Jx9(CJ!x#&CmQA0odb`&pD|>iYoE1hf6ElIVBlLhw{Lq*9KDvOorl>YlT-LPYarBNmyHRFv$Pk|yr!?!RaAl(fP*U~Q;@_8eeD%AUI>k`GzUV9 zkKlsl{i$#lb3!VdUv~@PvIOVX3M82iPrh3XTHQMH1iy!ERy^f1ws50*Mwe>~wWaC6 z!n8pUyDBZ{7F3tmxr+3*8b>L>cJ)!T(Brui!1(dpBz>etv4)=7P$ipObIXAnaD|Kl zg*k6`0vmTu03m)X;*V~-eLn$=>1zxBI8LOGtd@8T$=G6+Zt?uEOCyDE%?kd~**1+M zZr|n@tjLf-JkcG`AFHki)T$<%n>0Gt;xc<~7ZWBX+R7)oOi)j~o<^VVuRriDvk!#A zgZAsacLtttSUz867ev*mWRM$6(RwekmyYi(U(^%(0h%sp3~Wq6_A6iT&oFYMKbjPP z8k_)Zau^05gA}ZDOFMHo@H7&ZFgMKm*50=I=oGic`i06QT0h$Jd6SP!w@gqUYR9y9{o5w_L`B>+0M@Bhng3Wiu$qE;FpnRPn~&F+z-d@M9G^!kYZ@cKGr7*!VRA|pD5wVF71 zFjUk#OB#b#JZnQOAL`But5;!VK`%o*v*sSDOYYzZKm@;@bzkZ7pY*))k?vflKt%k8 zb@XypK}EDt_F=@}l9Hz0*<)6r&Wn{MkBC}@AsDMX=~3_|9A-%w9b?C$S(w7a3K~Y0 zgni;9wcoTabSdTN7h-|1t2u|kI8aEs%$Cjm?0Mu9s z;uFrc0zsQ;?R3z>A-Ef`xb~ddg7D%D=~JuTOapr7RRBKFfB%Pv!R{)eep)%VSi`U( zt?mSHz~0e#C)oQy^fR6=e0M`{`!ns~2_PZYUgrL}y19DA5P#X?LqEdg8oivG9;X$Z zKp6WLrcQN>Zwb~voO=SW#WK&XCgj@fJ^d2K;dDIuEi89!V7cJ?E#`l0&aGdn5(;Mr z?wfC%0G9o@4znrQbA{yTXL?kKilQ6G@*$}cXSg=fHrKYkP8Y6Cfr45Tf2qe{eu=fV zRoGF&taVC*d`QY%)cwPN!UM;M^9KWd1F z#w5F@bnNl%AS(26ZK4@Mu>}dQ^&eW?EOCa15UjyNM+~R4?l^m4Jg;Ir$KkYT3#?7< zS%Rj$3~|Mp>iSSY7Ij0ot+_E2E~w$A)o=Jd-`rRVE~wRCjS$otsHZ$b=Ejsx$tn2) z;Dl}dFGuA6pCeDbk}(#|mML~5o&eH02aVfqo>JMb3|_3)R-QWSILAqitj5Vr=NwJ_N6O8h#2FgQFHxB z>P`U=n6O(yShBX4e9mh53^F!Fy&*UvJ}b(+$C%HFL$R?x`gYY+eXYf2xB;#2lA~F) zVQlXrrVa(7YZ5+LD;lSPwA4q91Roxt+Inl*dF3pFgM@W@g$0IhrC=q^z7c^>R}#C~ zl^aSyrX$N?9(S8KdK-{Mg68lTx zABg*+uLfzY?C!cxXbdcYjlh@iE@njd)@DDn0S|$%@8XGN2*`@@r52oVft!UOJYMRPYVLtT z6G-7&?j9O1l-~Bjf{$zI*@qR5RSe0Ujn`u2HC{(o4W;k*3&5Sz+U_a8P6~SND9%iX z?c+OSkm}@F%CtAJ47WRg;!q#|<52qkc32%u9_;p*q2 zqhl4TO_;T}pR+ETi1YShDi)X{w!6Has69VfQetM0La01@hp|p5sTvO-+moS_3bzS7LQtFZLZKO=o z2_TBjXxgpVzmD^M(5!JLb*uDYd#Yun;BKAYeaIG00>@^U$_+#H;^b|dwi?pLk?SrB z!;X$r^eUok7Z$RJ)}JK(q_IicZ91a|AhU|NMcQ<%PrUsjf+ZZ}ziq2E-q|<@6>dt6 zb5a;~C1NG_D4V{G8x6long6BO*3U!Pz1$Y-+)p*+thjcJtx5!H3Hxh9@uEo(*q>7Q z>lPE*W>}N9(a?))@obGg#+Kx64*hkAPD}a0RFJCU`1FA&T9Bt}cCdAX&;?8zrH`yK zE4Y0oF%k-#|M8{la(&Q{N1&fbNr^Svr^S@K9LD`yUz8j~L{EQ{?^tmi!`x`FuWfH1 z_hN^BZ%f679&hmWxIJy#TwJ{UXHg`^2oj3BE?x5CFh6&_+&Xlwr-5+f0T&=lRxx?C z5k}%&mR^d1@p#Y5!V(zLC5)?jOKbW0saqqq`>Z(c)-HM$Irp9!dsnlnf>OQ-yiW>3 zM8}R3vz_+=JfN=y{# z#Up-H#b!W5AmCWu3~yk3Qk(dzngd3IHf%x=xIv&UpPH?w*6-tfN--{PVO%9)I!&s2hIjbwmJr-Vg%I5wFmteUkQx7ab>CtD*@UcRWNL(E9*mh{~|wam_$~TYtw=! ztD8W&^E&nd7*sN?u4-lnlZn%oc4)1otq)^!^@iv$wcer6Zko6@WM;l~*uKiri4>CH z3ddKnbXqv_x7nNO&S0-G?1*{&;iV5T2w)RrapoNA#&K zelMy%2^ZRrT7_JQrd)&6!U7a{qk%DsP*_+9>Ve4B+f|Fvjy{!Hw#<-J9k$}2L6hn* zly6lu+3eGMp6W>yu1y0s0qh}byU%Hkz`SUYQu|(Q8gRL)ZXQ=V`Efc!PbL|N3VNz7194pDhqz-esh6}t zx;|uyTDDh3o6R;~m{&Xj#Q%shw;Z?ML#{L-TZg9i++4+Re(>A0QmM8Mp977d9Ua-F z?2D#tp4c%))GDRN+)n^6lhCrW$_wotW;@`z5^n2ObCSR)gQlhZ@XgX2+#Ko;z*RIR zL_=Wq8#m?zQ`ED}^hnB{S6!BPN5O!lE-(3V@WYv9WqwX;j#$fxx~bwMz4Bz3nX_(& zOSjr#Fb`YXwmG7cpeotTDje8pQAI1{nx|Xh@p{&b8YIJ?V((QYC#@;F=EPDKek&zN zN=EeN^vXe0e{#PzHtgnF&*o{z#Ym1hE2P?v% zms^*T9E@7A>UdOZNm@v_RuQ2#q2Lu*enSWzrn6K6-1ufxuJ7bzQzWp`n(5+Nxd&9d<ss-};JQMdO&?iUm|f$R4{oV~Y0PqC+XlF{ZRe&z^4n>1nw3O)#b#@0i1bNIVhn+D+N8d*{PsfgqxbA!&N#`)f zsV%EkFXbSMxUA_&s?nExmKVc6T~xx5UlK_Cey= z_QgFf99xwQULgBfPpNm6nF8>Bl^th6%(+4^zq8YA`!q8!zcv`o|CQ?5T@RJ2+&i}= z`fUGP_qr+yx6A%2ceL`JC%YziuV)+5lL3Xv`Yx)On3I6IEo7{k2%>n@J{3Xf-9p#D zw491Fv@f!C0SFnM(6mXP@avyEcru91Z09-#V=Wu1}zAc_Zp zk|blb`di3oypy(*)*nKqe~8}Kvc1R$*xC;{Z^z$Sv5AMM5J zBr{r7Ui{IWC&A{r<}8=eR*;08clNk;3Ss@YeBcD2cl?%ajqFrucZ0G^Bvo-d>TJLg zQ(SM5mTR55USUuPsPSl-nCI#4IG8XI`LcWZ$HfB8{t}s&FEZ3hR<(KGP^GCaijG;% z!*m(flnknJA9(B`!ok{&h=1)5jX@i+>|_dL60M;KBgh->zW%Kv)nN1 zolc{{p4EbFnDhs@=y?B{Yj=)l$;Yz#J=2!aC+ipSN=M$<>c-W!O7gkGdn@J3a@9V{ zfd?SfiqFWc4oyC7X`6w+;dgk4SIgMJ45Ivf#i$(V$-fFYAv0D zi!;s=?4Og(+5%OdNv(#BWZAMYsl^#ul#(Doyn)8m?&5g+Sb5zccAh5o13Mw(;aC}K zsN<%j_grkjXS4EB$GCB>#nM!_5*HcI32o#1-l^#7+E$=3-cXmqSrbF5YoR~BG}lHo zq1YdN&1!tUTGq6k4_lkSaQ3|a#W5UPATt+F21|TWu89Rn!U7AEkcfyDUO9OjtlyMV>O=W7#3UGXYTKcIbI4n zHN)mu(K+afW-S{DRP1r^wQ3ZZs@PV{65J81Xdc!w*}{Nzm1}_w6_$KevCN3sk*Xx; z1{V`Dh$4DapBMqUR~)TD1psukOMf4Kmj|-4zW3Ep_4g};dq1}H?1gx-Jb!XU@RsjW zB=S9RclNDVoAZPg@5BYGn2muIGWrht6L^BPpchQ6+xm!8LN27(U+HZX`yS?fnXVuw z*!T@WWRz$p&K{nwsA>J$Jq|5kA-&Bv#oKj_(id&QKbV*mEYo{qHWko z)bN8O_jsSEOhV?c5n}3-C_|p;_}X|`)Gi|IF>hd6w#O@M)hu`p~wE{t?bFH za1V)3RD$Gdyf&M>Lj`MAc!nU1mpbYPP@Sh1VJr4Yv&a*87w9(uM%9iH#*A~_0q??` z6sk~d*?w)V8@X9m36=YfJOd@-9hjWtrYqQ->dvkFMe4N_vj+O;Li^!8=9jv zuh{a|m}^vB|6CVyEx1KA6Z@k#L9wR8UvseP6_LPYwCPF}QXlhGL8(e@MTb9Iv&x?J z>%mne5~v(i4Mm787D}vvDW}QNfsSvtc!cyz(ZW{J1W*~;C(@-nWkPLTXbaEug>KiU z@oo!07vdEc31(RgNY_kuwCtv41g(s21ZSLL9#caGp5g%W{;t>653Pl#an@wp|H+;V2hOS05uu}i{ z$e2gw-2%_aXik{4b~ndXj^#YC_E^3Y6>r~|{aDLI=vgX6Fu`^`sge5~eVPqM!Xv2I zgu}Qrf}_uHS-oXh9X;jwr~MZLg{qX{!Ym~tx$qiqH{R(@+lzu{X6%lKUpAUNk)!69 zEHGfNma@7z*1kHok@b#odXeFcHeowj0)kjMgu3y=Us)u>!~my&y8V8j1iQ`bF-1_b zERGRw-LfJ?V{5nPoM~__7tb-u9Xo3}YiOTDt2$qUe4Mc-nOU0k9!WQ7WMX{Y)70FP4pjs!#J1m`dxJ|?Vh$)ww2O^^GXPn40ntr zNU<9m-^>(z-LN3TAGm3ZH$cnYaGLYGCjt!~ox-l(3E*zmj9c|y{@m$?S45OAPx{AP zZA^M&C09{7#hmz*$V77^as7tDwHNWY+3y7nCP50hifgOmwVu_LUn_L!ZhDV3Nm%+c z*V`SKOtaAoxMj>Gxm8X4nQMJJS?U$|Ozh^=vQQ>pk}bG0jwn+qEs`l-TkOAtDcLE4 zZ^@Dx$E26e4uluwgtX=;HLZP{ucoltWu3NJt~y?+cIgk2&C+-XzSdoETVGzq9`$WS zkC3>2U18R|Num&ND;3@~L4@@B0Vmab{`kh*Kg-FR?&8Zb&)yY0rSmYqbS?d{@#$p^ zj=>C7=_mOed%-Dup4F`b3Ur~v^scYf&a#+sM3fEa*OtoC9bC`%LF|po$A?txL~T^R zLv_Z;m``Ol7Y&b{0Q|!gv`!UHIqb}9ebpE`{&eU*M&p)nbK5P{uKY}h^)hGsrMJGj z0J*+FSc%5feKV(0>0#a~=5t+koP5ZkeHg?2^C9`@!hC%-6A%BFS9{G+Zk%i`1+dR^ z=~TPH2Aq{d9-iCe)YOQ$l((EBI9tm{+uNE##F&xE!fBEE$|k~033qhU3IgG!H3?+^ z@pMVMTIAGwPm}aZqNO8;^v@&nF_&O|VxBrV1+3BGH^)^`Y=jfQwM-Ww(i`#y?=yb; zUXSrcRga-To@8Ik$x=U)X6GWgw$7;&fX~uH9d!i+>W6+$WBw^8V$@5m8wFcJ2VooB`!ORe07}FDNyqYEV!6r?dssTldC`waqG1}=A5M>p4L`Pydt;a5}IJ^4oY{0L*&y)2U@d__6=NAy8zwF)JY zcFOVNMYj8*Sy$E|%zn>#aFJsNUQ)5zZH*n(Em>6<2qIMtfqA4uVu7t7{R^CM$uC0E z?yKu)HqX|Cwkf^-JZd%VsD?Dj_}zUL@8-u5|4MQM3)aPeK-eyNg;kayp?}`mFS59QCt4}9ULKe>ru{>b3Z?8zj8%jH9~3M5Wt{T_Sd$t!o0EGxZbSs+h(npF?g2K zAOf71stz-vmW^}a#Q(5VnTyMtA}GpoIlkz=K^F-9 z{^;IX=A$a8nwB;AJAR_vWBBT#@sJ>BdCk*B9+_tO6_E-Tf}??BAWdKYmZbt+WjBNU z#Pn!BJcDS?#>Ff%)uHpYQ?m+eX!{F(70(FcGp75bB{Hd`S9S|K_T?Gb0`BBFnQI1_ z6I|_gZ>nVJuRjVo9IOb716*gQ`CBj1_rhSY?bG&m3Z|-KB3>p0iHhkiJ86ZE z_F;4g&HMa`$V5ud#^j=!3SLg)&qjBFmZe|#1Yjv&+`Rv^ zJ(z&R^7b-Sp_LObU?*??$~3DDpHQV477)QjXm+-#yAXmkyHXhe@vcvSK-&Wxxtphh z!|WS+$b$aTf`Z1ulV86I-ev{R%%Vot{`?AJ<_IkIm#!xir>|k@#uFS|LgGcc64?m#0(R+5MKbzqd>K-&T1{tuV7Sewk)LPx zcutW@3xla*H>+N*_2&NQ@u|0Fo8BkNPe6o%jAv)u>eeO1SNg@ub(^0;6-J3Sumk<} z^Rw0+*!=^j2`|Ybw{gs2MH)s*%FYcnSskw^kwgTkGa=gRgVS7zaYCaQt!<3+~ z?{=4aDm2$U`a2%+#)X62YIB6UoJO5fk0jBNp>@ck;yuyr=i`m9uq@-cqP}LeZ`NlL z&7)FSkzR2+K7{2a8?R-lix_A7Bo+a81I_V#fyZrP1R(i(10rn)3VE4{XZX>dBfqrk zbPwAIpXsAWfEsWofL#WKsyp*`o$=k16j?7`xzi&}`s?A1N-<-0&bzz|psG!#FkOAhCkLst|{_Q9% z?NDVtyI|tCOWrgLPmm6BLZKxIZyN9P_%dlTxS)Te9H@%wq=Ev@*d8KGKKLtM)RsLL zEmao4vmu-o8=jFs#@G|z8c((P&a(_wrSD!tSHJq$=fmIHBazi4%${zy$J4Fa(c-83 zG@;8pcHD%$ZD!Wkt8Avgb|9xioH4@@@g*X%ea%Uzp(rydR!J*Rel<=|V$2-l?BU`( zV{q*?;Xq2-eUF-1RG^m#`|<7I0@Gos$vlLWH#?ENMFIx%V*Bb`m*u5~cz~Y3yyJ9s z(10FrpZ|u@ryj|X!bzl4j}UTZt;9G6t~PJ`>MlxufB2Os%H$UE`c2x$PCLbS4N^%q z(GQ=dOXhcU>nrN9z)ES%Zd$(0=Y-I1QGIkPztndKi)7B?YbU-qy7z;P6z&~%``dFb zs>=HtN$V!d_tTCQzcc7miR-q~^lEfkS5kv^H{R+}0 z5b|SAwIu|!7H!w%zg;`{h69B3DjsqzLAOksK4v&fyczZoj1YCte^#@gl{YGSq1OoA zas@qSh#XsIt_s???asY)0(f#P3qH;3i`hGS`+cqQ{F`8jq3a#%AYbQ@%OyRq5>b8{ zHOC$5huPUJslc+?JQUZLtD$VfyiyjpSy}y{pNq-;%tnLIw?UxdC^6{@zHP6?Gp2S1 zbl-f`>E>Fm*_KuxDlGUuF}>k=t~iKiTtiLJWjXsm0a(S=ZRdF-$#B`qOj{U}`>?l@ zfEme1JU=I6NX_CM9v{KlE-+m8$BRy$A8CCxt{IG8mn3lF7WTd@(3DYHM)U6Wue*_I zv9)d7Lk?+{0xKS~<#!T{wcjRQ_2_llvYF=1?0wpoJ$waQokJYy6J>k9A^>6>xIWm{ zRiGr_=WW5WLUT8_{pN?x$e26YmUlZ~V52|s8kCf>KF*NJpNFtc%s^YAi>q&!f~h9nR#g3ed+IuIDyVUtpK#2;tw z%rgX%tTa096~f{JB=eV>xm!M@G4XAiNDawpPjSAsbN7#>H3~|e4gf{FCZUykmJe>^ z*c-dFOyu7`Tp5!L9m3O)GwW@(YI`^&d}AQyxMy<;N$#U~A4EMPu{?g~JyKva(7EuZ zQ50dq-0-eauh;T`gPqnKk>*ZDw`at(#~T?I>IZvHa(?=zz7kg=XD+d;c=@cbgXrV# zsWwX)HN}V;dyv1El<;e(;!JxxPXOJ}cF{7rhoQ0#^G@u>{sZTmSMuxdGYJB%vUL*X zt8sk|R9B%USlM{LWQ8j6;XbCxdD5`SY@I0Ai^P(yA~=40vSfG2DdZYXJMwiUY`q>(&&{{^Wo*wpjO(CAh7O46} zT;F{!E|RWGX&^XM*QvCy^6`6-Ag(SFQ}9pv6kO)2joB&eqhc(jQRWTFICx)E><}O>W zwtc?&{~!nFJmy{l@*Gde@aEPmJAo^EY0CMo`wAnXmAd z!!#Q<Zc%{XVUPu)OjPTlZ=oZARD=ly1x*bFfuZD#mS2^dJvv`w@!fTu;^r2%RiHQ}`S zYcj@Zr89zVu@qy?;|qky#kUvF%yPfyvZ{%P+N_JKWkA)4=c(~kfkPB4+Y((?Y^Br} zU@Afo#HdRwrkL~6pM4g_QeV(CD4z*YC$PCJ9r;z|31ojK2tIvkN^qi>-Q=ZBLQQ9r zl69Q3w@VP1(PAK93Dh^KR2OwS4u`D`*jn>2Ohsi4@t}%-bGryG{FCXPL&2_zk6y3WOs6 zGo$M=P-Q!__vvGbS~Y=^t@^32*2Dpvl=sM;Fke0QHyr1macX=o!Q_lyKt#1 zzD@VTtM$D*Nk++@LtUEXFQO@jI0IlqPp(y3y((&kgKIs74+4|%fk@+Cxh6k|uBu87 zT;RMfXoxqT0tNzl6 zr~5*Xd#Gz0K7Uz6F-Mp7W~7&G0VEkgm<&%_GA3p|(oj-s@h$kUNc5dUzN>#O z$I$0i($KJkZ{8K|WV5r9!F`b8{s4L8!Yg3h^_0lkcFn4@L*1p;_XMyJmWx(vIljEt z-z#Y2^w?I!3YXF9P*GL>@5J~kQQX<8|9rJcgG^nPKgLZZMcHltS zsCy{rOML;jR53grypDx2x7B;$y=>bsVy#u!%BTzbg~Gfm6B6Q-spE<1aP7UECANMC z!sQ=5y|WDV8ArAjYtt*RPF5+;+D=M)Rl0bZR$Eh?!+ZCH4!asoS1p0Wm z$bKhTJ$c=`Y$VAkEcgZ%nZv7Tay*Rj1Q`SeC#6_fB4Ol_<&_(~j1;dTaG!8r z3rMku_%|=RSj%9D0&f9R3+wLzLaizPitjIuX04+`>{VijAnziRSY_PAqen^}AqI9@`I+JxH&GeXW2FebSo4~jE90&`k49<8ly zhNzQ#+nL0BM;68w7p^yYifzo0P}?7Nf^)Vq2v8_^S;Dj0v>L}Lnid#bYFJh**Dt2f z;>Vq}UY%v!)73u|D?Yx^Bvs)wSua>qa~bAllF?Hd;3vqsv-&vGbIZ5T9Z>u53B~NQ zKO!onqkzj}S6y54aj&LW)T;!rmc_Tjtjzc6ckLoD#zg0;LYMb13~NbFpg}Yg!N0H9 zqZFG!G5)2F+n**__kM`7#B}H245~-Wy;SZ1a}m=*T!D7v~}zOn4Nums%Z117I` zLCNOZ2tiUvB?5S?tCVhB9Z%XuP;O96reZzsBAlk`75bs8x|;}vf;;IO#TCUl(HFQFdgKkunXts)w=m7ym9++1T;uE}O$`ICdpUCi3*%>((>mWGN&J*y`Tj=cp9 z1wpYV0O9BpfFH-NLCJlp`T8x)qGNyKPB3O8W$8F|tG?$Dw7c^I1 zfwh5B>j@yOg;G}HufdFvQyW(M#LcQj>w^Z+UguVrR^a6PvG8F~hjK4&O|*31VEB#b zFO~o2raa%P2m$Y#ZB`sj2_MexH^PMHXn9sczOtFwY652O8UQIjp46*Ld!C)I>!#LLItI7|xe>Q2Jc0PE?Wy!IoaeX9r z$KxLZUV3kpU8X>xX(56X^5DbdlqqBbjZd{UCI--q4u{_U*`7nMgbxzeD>%K4#a_49 zM0bew$dx^D`8J$?Y7!1O^(o}uU#IoI%>N!eitM9kJSG`t+| zyDCe2a$kt=l}c-i@M5{yIpZyo{AcQ#KtJ2vmRTcBqsl`(Q*fT=$oPTCOwH&ShK8}C z2+~_Lx5e8&Gq=lF&H5UWlBnli82gTKNEuUS9>R;oHJ|JQ>%1?9zf~tQMo}BSpJAQ7 zyfdV2kvBABq(CUuyCh?imKO6oCM7#NB{AY#uv2BbPIX1M!4fdhMqU1#P zsJ&qL8?P;Wvr?(oX2;@jpw`UtQJHfB_1`N%1mu?JrRVYw)*Bo%CnWtnI_uu8b_EnP z4XjlXFMtlJ_Z%HuqRqf&6kVCoD;XUOUT&TtNh^R=bBNc=0(%LkfL0&i1O~s{5pR5ZldzU3WP!+jTS0M$93XIuzTRd8x>Qk_@O8F?m_)=8a42MGrW(jKC z_PJ9=Iqb|({tSx@?|O4YLcS+|83KJfbdWqQ<3B+IW>y<8dG!8URoc=B&hOGNa!B9u zY;x@^LCtOj^$@B{Dg)vX4fs;9lX=tuAXHiP_5JBT7}AaPggH;e+umIdpjK57qA(AnGr+u!2qG~XVyW;u#MXucoJ0-X95d0of#OaFEwAN zgmvtCgqyOAUs0OKn0=$f(~qr7mj6~3|HXW$EIeo#Ilr3mL|=Bs1iXTV-ft4PZGFl0 z$FR=i|JURSRBFA=t4lPwDi*BPd!iDevigZ14)*_C_&iJvJJ#;T_x|JNaf$_dzga|J zL+`9txtzf}sq!DKOwNn>l3u|+RoL#pL}#c4uGxz^N8`w3Gc7Y-@$^g_1FlP~hN`SI++AKydF)ARI?o#WY+6?_$M0fK-i?Ra(&&Q< z6mEXd7$yac3Ul^DFw0&2wkcWVtUqYG1Fon9)ddq5iew(=aBfKs(JrkRxSTtTCQ`c% zeDr;JyXXNq#Ef^Fav{l;K|=E^N2P8#&~-`UMlw?Ga);ZqQK5v>MrVM!-IcX%-ompd z08OFoWbA1d@QTnpgB}ZsPSsY7J|$iYBN}4+p{I)7<^pDz$T}Y~lqsga(H$gs|Bjv* ze=%eJ!t@iWsIUJ)t9(|Z>vXIeNTdsf^Ct2977u)U5$jkaK68K?{kq%?Zv{r@S zPyT;v&0iZumBFr4>*x&09}9fbrsn0U>hTTy-g0rGI##P}gC*6^K1TG^>%2stfHLNr zmN@VEY21)|C+JFg6qcU}RXMEv3Hl?{qpQ}fYcp%=o zR1tF=#2cDbrdY~=i_H#CfvoWQvXgexecfy)u@KA9VWWx((mD!c0Wy2Tf-ZI8<{&Gy zc_5rjyTsv6f_~uMB&;(Tt3Fp+8G-kVn_)5FYp`%HP0G48N{1@E`GaP+nUof=Cj*tJ z?2e?#X{)v>I>fxRmvWwXKX6vKL|b6!;b^bg*tauxY`CnZjMG{rtk^S4P&wb5-*0V? ze-yg}3-2a^c`6^zaKv^#$>z%z&7*T+7aAS#o{~iD@U`ws^KQL3rnxwNvmRaSc6N_y zH1BIdzyH3Az8+Xvu3LUR;%ZEr!OH=gK`oWwHuR06fT+)mk)Ho1nt0g!}ZT z&q-<#4G^&0K0W))LR>tAsiHjC5!}3YsnnNj?+Fo%@(OB|SUA@YlYlJE1lEOXD?ls4 zo=wlJvw0(K%R7MHzq7fblDg|ZK4XnbYT2mVHU&cM-Kd`W%4X6^B0`MwlS~Kb1t=Es zN0>WNB13ScYo%Jf)zoLJJSV#xMxNDHf0B!+5T~Bl_m4$Rm?S?X4ax5)Doe=*SIOxT z$#6rsLX0K&2roi0aci4!RsrCV*e*Cz?Wl<6+E#v`K(>%k3VZxbR0rXps04PiL5#&d zX4GC7D_JysVAMb=!`c;tMH{Sab`O7q!xA5tTDtT#P1tG%h*t+U(^3Q*kil|ZNA^mn zra@!5a#X!i$fE8Lxm?6MyqIZW7RH1xt6$k(-@-gSUMIMcPXJT%Wb3a+EzGwFN~s;G zS#L>RUUX^)S`?#K=RXPZ@M`T$1RQe2XP<6tDG7yHx_Qz&o@UsK5& z)|D7r!6Vx9q>)E$R^O~_(z8EEQL;A&K7g`W@S%Hv{cLhHav1zku&Da|6)Thgp1g_ zY#ju&LElp@q!$JnaQAOAWG|N!_ij7Vs>Yl5V?^fdNAQi0{0aldT8GCwHpa>31nE_) zHn@vczsaqoRYT=8_6stccX>S~>N`f_5_LQxsvm&si?Z220Z=`UZ*?ydI4#KBDpusMjf#2E z{&kd_ossENeFG=cXbSK5x7i}Kq!l>j-!Rl`mp7~yo#otpvt~4y%W#;>>4sk!8T6=^ zL~^(K>lVz7Emlh-6)SN`p5!t0?O`t{WTIxCy#DU|`1=q#4(Y}VubW?%e63gMcUL?= z<6^6rwVd$)cd7iL?c*%0YQ62bHl>-wSO^LMwgn-aw1#_p{mOT+e*ogj{(fWs)E{(1 zkwhcD9Io$?E+#DvNh1etx7;S-!g-ZCZ$B){IfEt;5QOWS-m==I39t~Haz&)E&A52L zzY^v|6g3Vil4gt_t2L4QR5SFax_+(Ek0F-xWdnAz2}2&{quK~}9RH)zU`x?@0qOgc z?X1rFWIo(vU!VXq>#|ek=oeMJPF?~93Gt<=g z$)UAq2f;mwS{qv4bCr+nEH^eP^H^X03M%~ZKvwq6E;3#@LeO`pbNB>M>sj&R`ROO9 z4*)!iJod%X6O=HMEOb^h`@YQUb#7~K3A+IQd~?2k z+hqRFy#7gDj&s!QCBSHRJw7#vc(}>Tgxy)s!l^kOnpn=7(Fsf$9_!${K1pGj^`5$j zR@%NNaUga~I#+@}mV=0% z)w=j{-CI4bx(7pW*m!nPX;0V#w$#Zwd8%E%%CN(?N9TH5=y{+%_9-{W{TEqGa5Fd5 zowL@>h8h?d;74tc!l(L;wedy`lN+-KU8WgEE9P)K&yC3??AXhB8Q?{#TVGgL=_01} z*gOd(Z41{S?5Qf<+q(D&D`LN2{6f(EZgr8|S=X45XM4X(5$r>gJhn@N`IT%4Yl zTwvCug9hPC7Wh2s<;tfEZb|Vf@Y6_igbcYbx+E@TdzS78=LZ4>r97_h*x0$UX^Ihc zU!Y3kUO1h##G!ITX;XAnCsgB(Vw7)vH4sY)hR+#)oz)Vu;93h-v7WwLOTz$Nb9sxh zDVQ+1HD;DlgaTGJ%edH%s&flWw<0`yHnlqYyNIy~ice;yE^h^^znPJK_h}Q2i~#Nk zdpu@kx&D8#_uf%WcJ03>KB%aOJctyjD!oe$9UHw#3nlapNeG1A6;L`z4PB-89w1bu zhbk?UP=rvV_t5d=dEc||t-$~ZJcXnHQU)T5gT-`&3@D z8XB#QC92lbEJtZ|)tc;RRyO!uNq|E2Lk%aDz8R*_vSU13k^B6u;!4`YpU&zwAC_h= z?gh*1C9BXp?07PTOIcP?6>rg-(2!&jO_!oFk*C%kckroS@ONr#mfGgt1MsiatNN!# zZCv`}bsB2f^M%Xss^)nKKCXwWts2KhbTa#9b)c~YOY`S?E5PPC1`jN!;r2>Z{kw*eYWh%ap2VE>YDw0^-fz&@riYP?B(E;q1=-Om&&iF zpBoOj?(&98eamy{89Nt{?PIO**k3*r6C{6|KA}S?uq3Hoz#G@>|D0C_)@_P6wCeGWUt$a`j;FsGk2Gpdg6SjGzw=>l% z=z!rBV|vN760j66xNpeX@`+p4Hpn@8#Q<$JQg2W$dlgAK>`YPxWsX{L_TJT+e_@3> z5_z3##`nc*#Y1C^2`wORjgZeWL&3wEH90k?thZWn$ElfvU#d1_PnuCyS`~jNMy*$X zoQ6tIv?niBB5?fZ(F`MuF=*nN{n3(a%)rJ{p5+a^KmWYLL~uoLKKM%MUD8)IK(rb|S9Cw%tx^OQ7u z+?{T^Z;c)`zic2@J_5FJM0-#2AyZkEEiEutwT>~ zm#2W9QNtFf015WYzV2n(R?SB{lutrFsbEV{3PM!^nUx-y_0#U#vs?tI5;D)>UZaWg z3F}YhKfKmE8QKJcryjE>bRQ^5sCCerUkg2i8V(SQ*8|!n!H-qT1Lj2Nt?h-iCuxA2 z2uN`<0hQn%azRYZUnRpYhEQ=bx24nMj7>=#OH~WOfVu~B_qmHZEH|1Ku)sSK@$xl5 zPrH;?Faw+~-BS@d?ml=YJ6%n_XdF&+*>`0Gg!JU-S=&bcPGnFTbxa*P0A55t0fN&B0mJK2HPKGdaxf` z$R3x5JK;V~G(}GyI8i1=w&-Pa*@y$*PEd_R4OH@$i@srZG0qn1A}}vn-VSu@Q$tRa?>&J722B zbaP$=@FGJ+{QblhOd)eQrxb|Q(&c@3h_o5GRW6i!z>wdcYXebOfwrga_kY4$_boVj znklR|eZoMTXZZXvBRo#}eCjQaT4&~4!nKcii|jzMnC# z<%$4n$VDqJ_;lehl)ASX+7Do3b~M>+I>{csH;FRtEB55u%Z-mK+ipFktdftb@19R8 zl2pbUh&PSxN_I<-^-5#rjo^wDei+xAdU(+@!Te+3i^*OHJgO4R$@+d7XjjwlIk{r8BRDF=~1KUuEOi&}1dzC;_t&%Z4z;yyaTPl^R> zSUwqFu2noc^ACe}Xn-$a9@Z(d%cjin7KXJMv(Mgo-DV5r4nElj8+~t&MD@^kWe#rV z)qPoE-T6)Qu)m+_$n__`*_iNqqmD3%p}=T@q2mv@|y%W8&e@Hk@JlU!ebYqcY8c!tEQX-o$@)sI+%)+#j3Q&fZivx`*OCUZ6l?9gM%X(9$1**w|>WSr~D;rDquix z0$UMn=R&rn(*5|p&#*yGfD9ke)hT@MpAC~J&AsB$`E!^1$c_hTbOK6QS3Bq6Rgf&# zwV&j8pQjf+;G8NyT6e7TVaSE#FePdb4bm%%N^|$2s`AxoooXhUkCb;ZW{xw`*MSW> zJax-&ojqS8n?ALg;7lsl>k;<}f%3*m>oPSQ?8irHAU&AY#NaW$4NcR;P~?=~qqplXWdVepNaY*k19S2>@QV}_4GlQxFsDDaukrTG2v4!K?!xDjQhu>=|DsX6&S=HvT|2>~~H=Y>~B+LJw zIfDJ$#QA^0?f<-0fshqYW^;SAk+N9xHhe(!@rT%^1RwS0)?u8+n{43B736CI>jYZV zr-E;XM@a^Gm)S|e*FCvfBFlK;K6=3bnKDsWH2dWCjhz`O^KF8VF+<1?-Tn_DV{~S% z$F%A{g^Yyo_9sZhUqVL9a4s8ykg*HP~u)<*|&$Jo~_} z+=9?+q$<;@&~$X?zEI>D`=^eiE-vonITxmDXpJlU=mzM@Bi^ZflhF0?aa2Yp!>|AZ4>piQwPCj z-gY0t%fP^EZ-VY0=tnofm}zL3=K9ZQb_mQwjb$9&Wrb-?v@Kpie$(Le6kl%I7I55a zRAW0>2wcHVmd}=2I88Jt=%C>6sHb=S!YHatYL} zq=6NQ4$PqHN!Sr6e73%D&d~$G+l~+bO+qRU3mhL-BnzYNgC}k7sF)V3=;ra5>G8}R ze@r{V+i6)Cr)}DSih`KTTQ`3%=@fP<>`Q?cohDCa*C%{jxfF=#WV{1WP_5hB? z4LpC*8|k*bgnd)0Pa22<&?Wu9j-zxltshP(IGOE(-Pa>oB|Ar0HLzjYU~5i+Uz<(o z)Z%<>+7R$T08Ec2p)VA(@!E zt_y&bDFGZdIGZwC4;MXFp^UOoIWJL>oPVI?jrY;=!FB|a0xH_~*QVFyH(_L#yz_0O zLp`$#ODyZ8m{=4{{4(U!uW7IC!=>5wb;*7k0so2FgTMaQYRCTTjFK6=ekS{=#r`7s zFt%5*zm4?Ntm}GVqmBM_9d807Vg1{T``e8ATbch>q)yBz^AI@kN@GLw2(flcqv^PL zIX`|SP|6O!%UqLuL1`k5Nn5>Dr!+&u_D9+{~6}b{jt%b<|7Th2BfT}^ug3A zudB_^gfYz>W%q3>N{YlTC8cK6?IQklFi6;`dP2FHls}Jm=UF_#2a~jpqAEc&T;m7R zU7LJ%(j6QpO5P^*Bb z`d?CH0JT7TyKTylR0>X@EV28bmoo~%oPZ=zhsK3u#=^KBo@0hTL5^F>rzD-0!lWQ` zqar$84KLZCEmDDQW1gjRJx!j>&nEh5wv02=F|b#_^64f=y$}8R1m9<7Q}Wv5SI!v@ zoGM9C1W|6S^+=)}?a;%hMb1&JMDSmTTB&ZrYm@*7{tHU$V5Hw7^Fo?3AtP&~88C|S zeCExP5db|Jx}7hZWtP4WZF2@Ppj(k(&|vu^XDWl98C^}z5g4!0`DSEPkh{)l@xnK= zI0&Ez@|IW^ag#XS>G@3*WSn}+>$gzg`M#Z5Ip^zeg@@eb!{y=c7JvT*$^U@wr*=k% z-#k{nEf_7a5IHg%**!q7H=nf(aWxY6Xo(&_`#=isu~JbE(nvy(OrX@a)y^jwO&idng zZ=ZOVFJIRp>nkRI90rrowL{Su4o0{41*UijV%k5cg$h>RMt}|biD442i0?5;xl$(kN%XaGd5!eSbbVdJ%Bnlu ze13l>kUlCA-nYV=V_ZQcKPNbwS*&kb7AbGNcM!#22CMQw1f1Y?r0yQHoD|{jvtFH$%cK?8 zg~rINY-dbO?8cu7M`9}+Rm^k~^V+ORJZ;nF1s^`Xorv}6aenbe8Au0o1+F!ee+!F3 zs^+bwb2V6rY4nbS+|C#gOz?0We%y5d$18A z6HWE6DrN)jJO1SN`FmyWF&@9#1|8A|Gn_B?P>L=voQ@x@tTo#dQkuhwDVmuRfi_~t^EMXjnWFvHrZSZpy&d>1plXVw-)sE8 zav@VyaCFz`&V7UBN=9Tlcxp3eJ+6O9bwaUfs!4y?aaA7VhwP7dYN@0pvi;!eJEE{5 z$#;9A=^tjKRwpUa8Xc?4X6szgjAFqbcm8Z$PJZd8H?XZm6{n3&&|~APJKC+=6sSXJ z*XAa53iG};$336UCr+~RZe@%Uu2W?07rfg$fOwo4+j=opbtH1cQ&0tu+HDc#MfP*3 zes#y2K?Mj9{KW4SkZYuv8RoXlWe+RF{Rhh`bbT8i82X#YIE%RA+uz&jzi~&hw4BN= z6>4>u>SQ8A8nM(cpcaBxPOy%TGT*-pxW`spk!M@LZaoR6r=+Cqx;3w))CQkRXD=*X z>_cZ~o{ZsRoZUmu3MJ#N!XzY4<&xiT$IYJpENu>^P04cD7t#i5;p~6)_U9YKRiMVn_Y#getJ|Z4u zOYpmkYQeKz(~uZLAHlFQbyoPDW{9a2uJg!Z-h zMm?E19zUB_fZ-NIOtB+wgE&0`q`DDf8sA&w;|Ov`0Fyevg#cO;=eG3O{H0NR^8q7Q zlW-}3*7d1lvP{yqxKxwt#w-`ddegi|z0QECPGcpNveKaKW6-~FThx6@i8 zV^k(B_gMU0^0i-0>KKC;%o3lMZscX2Zo9TTX$5_RzTs}!IB}X1cN+DFAIaZ70lw-i z-H&8#IUXpiAGTr+EGj=wfcif=TZjl*HlsgKH4x5&<_#TgE5^ODeS1&8#a#-G{64pR zZ_XxpHc!`tXC0)iZZt0Pn@|CXr;~Q8*(+_A>A(QVjl;4vR>`f)abrTLeS+g~kBO;M zEEyr{p5;s1n4)Unad&u>D=cHkeBwmiGdz8kal zMNPOC1P=Zt`W{=g@Hp_6b9`1yirnl%PTR2m$xnI*j>Q`MVf>Y2=fzG@7!jwSzzd?s z`yNTx@DHtDMa|BdYbzGbWk=hF-qw$^m;5FIBqv?UxOz!nZTjE7jymm`f36SEZVg0v zPR{-&+Ak)$%i8|WQSo2D(~Y@+@MgHcasJPjGCi6;(lr0hVpu9bT;dO6*Qj^*vb293 zw;`vPd#eJ!)x|J;$HfTt&+{CRM?H>>)FAQMkj8;{ug|sX+m);<1s=b&9t6W5K&xL< zXSE8mL<3jLOI2R+RoB@b_p)tfmMI8DhIziBqj@5zAqUL z@;4M40%{FCMlKa1@gVmD4fvK5YAeVMW&K0dyb>u2t=naX#-NG^-~FQ{=(XJBS9~%? z)@?fc7r4YS@D3gYQ5Y=WaUa$#0?VR_axaBX-_3O}JyceUyXI4CJStazH0kiECSZ`q zYF?zw#YBX6OZfoJ#Xz58%$z~+FxKNFZi}J^VGnB}^$fj?lX5+3uz*^H^AX#Tz4#w# zsi1g;ViDA}@w!j`MBhf?r9jHRbjcqnnYB(vV<6{}S1u19sEpg40qQbrr@c2Y3Ah4 z(npqXmjwpoElLz}Y@aWA(8DTyOr;(KNig}4GHeOc)!sz3+*b}#(UN>>$pswvXJsA? zdjy=C7l@yix&(sIog94S$Z{>%Q;!3uPsni?!CIy!Zj%6_;l~u`!1ulLp=XcF=Vme| z^(1Yn%dA@?j&usDOIo zwb6klZ4d1mn|sS!BPR9@e8o#oo9yVxiIZLZ4OB$4&J>JpH6d53^j!{=7p$7!t$h_9 zH7gEzfNuWWM8e^OWM&bnT|@&nhS?IlHS4qc}@L!>){3hI1dW~M9R0`S0}r2_A> z_1MrIFA@tfUrOaJr3blVz6>2{=lM}*67y_ML&6YYzg#;&HX~YXeF74s0<=LG!bzx8 z32x`ijdq;&pY)>i{Rw=D#2aNs)OrMAu%1!H#Xz}Qbxd&4B-wfK&NfMRjG@g;iI(N5 zQ&Rl<_G}kMGv00qyk@bWT9A7?tlEzbZlRiM%gQ9HJ^7G_JuPxJjhZ#Oj>F#m=$_h9 z`}W;I`_@Qbo(vPEx^BJR%)Ykg9!^Q4;;um2k8&B1$saQb8Y|DI)NC4-Z}Q&1%|o=| z1NY<~V$C)wh*eaYgBA`C_gc|5cR8McX7V<+duOw>Ft-|?o>o*Od##>A{j*(o-AnR$ zoyyQv)%sQ|Bi`)oNd)ecA9-fakk#I<>3j)hY8jenJMjtZcFzR=616s93VAwCQP1m5 zUJaP>DY@XhhB-~euMbp21{g1(-Gzn|GQEZPOAc11@+A}lv}|#fF)H4kV zt0@{=O2`I&UfvT2l6`p5w;~Z>me^Mv)V&|&bT@ufN4Ta&NEHwp86Fwy)YP*(&mU=9 z7N=#CSl!QKL^4J!HDB0L<)6ueu%1EW;ROil@u~;87P~9Ahr7pm4|%`qIyR}YA1Bvl zsvfvfyx&hgbJI=ki0aGFPb-|zpGtmKty=90EQ`i47nfr)UE*wIUZaBbhk$y%sY~g* zmf!*RTfo-M@sj(BrbA0pxi+ozCD8DmS4ef%B1bMmOEH7YuXPQAUO(o%e46($4k4|S z!vQZ=_@*W0-38~yQY9C>`d2YLHk;i76ezyI1pBTfkJ&lU*ThJ_IZ<6B(L#J#*5cAV&BU&_LxR_?{< z3rg@ZWJFzG{i{(ku+!2dMb%i;1z{VXSVzwYSxL zy5=h8Sf!6UL#!D$NEwQL3;t=szGGJ}g?EIYCHg@XaHkBM_LPQXBj(wD9sBpuJDe$C zxBGJ0=f%fT9}y}(a)kxrV@b+gL(N42Kp0?N20XqWF|k2jQ^5B*zvY@f_m zWPToh8IQ$K%2frE*)P8yrf-eGbDrEsJZtJx%3DgafsF5#`I~Kw*dyR^<%wa!?JuL>TWN+cop_G>R1Ia@N8YflMYnJL88dv7y*o6D(SNkkvM z$djphnL~Sk!_y(7o7voP@M}km?5en!#m$y}!ZJ6&liuJ+5T9FWq)NZqC>+vee|fvQ zax^;YO}~&0I~w~2GV!kue24mVTO;Fe`w#aI6VfZI9_5p>vqORzBp`TOoWTR~=-Tfh zV;>u?pN2v&I~cpa(i24ApeDa2uM~q-H44Mc0{#j0mU@(P^`vRu_fi81?5L^>r2OU4 z=%U8ftT$+2%>lAa@?qs{-s}lRG+{QBB?*!}ygVxsYQso9t$5FiN0;`w{RDx7mL24w zojjKPf=KXWswL+^?H-myDl}>>z;9HHM+l9xnwfs*AhiYQio&jz(PMoY(eh5nM-SDd zoU|{-8K{-n7}iM;J}$YF?4y$c|5#ixln zvzKks>3b89laGz}3Ok`?J5i%sVYEqKFHaBAnC2#8pxS&oxf|KSy7cxtlR{CegjxLS z9KlEe%>8kO!ej9}cJo3Q38$I<%a0bk^vv#W#xbU1W<+(*BvB6Vrp8g9qG9U(+g)!j49oz0pS5tckHkCzt45 zYr+_&`v`yJHZgf`OtFcE`O4e0s13HEN37{(xGfV6y}q`nr`$d#5K9T^H5*ybhV7NBtayk9q>_#9=9n!kE3U% z3w2;lXnZL;=}{Zd*K?pa#tpEe#tL+$VNQ~7=YNSb7F~rgdGCL5VN%(Tl~9z#NVpF_ zHmWWIM)V41#Bd*tW_Hx@`Y*i4OC`N5Ef(>o+d!~4De|&gW-7nv>+F*q#u3sZf%pqA z6*d<8Wu^Cr5fUFO$BZrx$>k$+!9Jaap0SgNinR54163uSS0tk) z_EnWQBLg3fh|uRUXl*~ZJo0}b+wkl+(F5n;*F$|h1FoI0^Wb>7am=hx;C#~<1a1Xg zqsMx$YSvNDObcN&Y|k@#JY4L@loqS~vcY&Mb znX}+cy$O=ga~KuwgdPP(I1b8$lzkHK+6&8&0;&jxgSdKE;M2#Hf$`=EIvV_Oy#;Kn zi3ezoN6LzxPv*mr$~Pa9mEOn-v<)m});5K?^SxF+_?$RxJ3oQ^;R_Nde<~QUX#mvg z{mHOo->E#iOWFQ~jbAGsu4$h}BUb|2XmK*UC7{kz`s3RJciDB)(Q5TgG}nWSW_GXZ zmt&#BS>iacFM1sXF_;!letqDUNkTI6c;3*{OyMQG1&AweGfmQma!}&Oq$*eq_0XRGc~r zOg#~0rI)*6Ch3vmg;$&U3P&oJ=>AzHQg*lfM(2DLBV4>^l;vi$LkiNR9>taQx+!5I z!@DbQTA@$cGHFv(QFxkiBmcVb!UQ9KD+jB#Kp`v(qC_-&GruRe zWL0CiwlQz%{c%Ye_wzM&^jh$n#vs=KOS=dIvB(9`YdemVrSlo;yTQh!W=exS%ABo? z?>m=a%k*u^j~30dY1~=lgqrSnO`#QYA->}a1fJv5K*bs}&TUUim0{43F zke9!4C0j_%!?5NukUiMlNIK+mR9vJdw49Dh#rL0wL3nIA`!)6ww5~V6c=qMcZf4DC zGXp>t-(F*Yd48bz#Y`FMN5bs4*u)SdY0B6&^mJv}U=I9iO?f80WL%@4x=NJy3yIoJ zE2GIts1_61dP~uuF-`!Ju9|dCd1JUr-%I0*LITGljwBrFjY4 z>XF?vsVdR(;s+0PzWWpFRcZ;8F@qHXm9~wskuoCLm8i>LlOa`0DV{`$S}>%CwuU>@ z#g)t6ge}ql|H+Wpi;I;e*0&PA$?k&oj9#ek-d7g)wGuj`%hANYZyZ>z>06GMdFh$% za4XRSg#9KEZs{z7>e4MvRHf>w@`9M$+}=lcg=mGUvyW%HK`nKa`F}!YZgd4y%(;ly zj0)V2fn`l?LJp+F!(BK^PB+8)M66^u7}6qxp)KW4niKSH8qTcL< z*6)-;s_l7eGVoF3T8&QWib6(6iG7L*)uI80VoI&dH>mcI1ovkD$fCZM<%2GPvbl?G zx9J`DV#%@|0mK+Ew?ta+qJo)-|0~Xz?A!O`9?gJBO}U2kL|nt9;AHHoLUeW%Zgozj z!|F8utmoMl@SKY+((WzUm#+uwHz|#dXM45^QHUCZs6CpugRX9ke0LNa-ds7tF#Dr{ zhV^tYLvYyLwj=tW7sY;b;H~7%ew-~Vp07vle8`6#Y5KU)5O1Bgsi)?LNkS z` zoo3l1Vx|@3<*O5Vg&vFhc{~H^rKo+fF&qxS3~N8ouzpyXVUZZQD3)Fvlhz||&bN4Q z)h59V|NK+3udY?|cH_R#qr7C^2%jO!TPQ@7USyHP`1S$PE1hdXuBdq+zWTuA4XS%d z-i8mjMt#uaQ@`(NXnNr0lmOX&M3`p zSD8oOt7n$nLdr?n#*zfF;=Py0x!X1h`wJ5N4Q*p4G2}IMVx0Q}x7(LjHIjtN>>@O4 zQ5}ZHo;p_18Gx86%1`CnRUF=({gYnwc7qv}(LIL>eV2tp2&`+F0FVzWz4>g&Cj?$r zQviG9?`GSIJu}TF5F+w)3D_cWkAlDtOQn=5U5Uy!(_ZITa+&Ck<<((o3!y%)!KHG8 zFUMoIUZRJ;U)J`;#xnX!HtiQYjm_xro@jCX2*KnH3Cipvzk?%yiAbS$#eu ziIPaZVs|q}-@BZni1|{tNDdmW4bXzIN0hsq?sZ~|5+MUdqu^!Wn?qTa_YB$bcHxSB zy!ph<+1a)E_6KlRof1C(1DENW^z41;WuF_W6&m+>hLZw12JkaBdg zKj^^wGzhg1lB_8wMP~ zeD-Zt&avgEo;ja5wqRYgHthK_!`>qA;+J&YDJFvz}5_q>X`(vi`Fi0Brr zuZ{`KTfc)z#lDr(+1E>U3?(a3W(MUu{$%x|!B*j1e#f5!5`bzqEl9x`-V-E!@d76_ z?b`I1J-%A)u-RKLv98<0pr8b=d*(lWSg2ryhMo*O(6+_u$DYSilL9Tg{JJ?Rem>(k zp*CJD*W?k;$YKey+LcwU%1Xe!Sq0KB?N|MSZJI0-W1vFPUfA2Nv12D|?~JrE@! z;S|kK`gjfD=sXa+!DIF3e||0*jTCuXA(P*gvqRu=^FZ^Wg6!Y90RKDX_ea(#_Pjyb z>68k1b@j~5up~4GbgZqIPQ`~$UC2#T81(`FY07AI|X>M5wc8ZIwcjO$q5 znFlDhV2sbediUpF{G&b(8jq-EMMqjJB?D|HJY89BP{gqt2=EAqLL(Yb3#p5DQRL<# z#6lo7%`xTNc@EGb?ag&}1PF>h;5_0}{@gl24>H*1?Bg@!16NiyU&}`CsspI}L#^Xb zlqyes>gJOo^}LoZ^)?KBJQn84@N-Zem5=yu*R*IcO0B9f3XPFK8s4tl4DjlSC$`1> zs?jO6>D6zdkp=ka}%OM%YB@r)h;j{KnCL};WPuDn8W9{##~cu>AS z@Jy`q(tTp}*WSX#k|gPsh#SG*U~He#BX%~hvM?C^o9Jh0ox?ZcsUYOLzx4sJ-cTmN z3Gkcfwrr1|%_UX4TH&C+r03{K4MS*F4yLC@YUeC)aA)Sz>T{20D3u?gaD>g zvvnio-5is54?^n5|Cf??`Tp{&;<=A%U?O@h12?!pZum1u2 zH5}!SyXET6vvz^-_aFXEME8X#UhT=>>hoW2Zj=H3Eq~m1ud)k){k9Dn&1)uHK?@VN zci=FL^V-GfH__b{K6227$@|MlnoCj(ZT_iPGTj9k$>-PVxVU>twizPRVH)W256Oj% zlAg|H6d`u_{OGXxMHuS|;|E zAW)i{9~~0xQv~srQ~$GvSr)t^bWc35cOsE8WK);FwTIK?(e*F11g#G6ZfFV8mnXh_R!&8 z>2*~|M{RQ15I$cSYVvGQQIM8Nm@U59e7gMzC1s{mKo6wA1G`{`hsnQkBl?Og^)K!M zH6{ajOMjT~m^mngPKph9zpt%gGzfwsvdKSvdtP4%<$KSbWM39x+GQi?2fCM}(NBf6 z_3j$P)P?i8kKeBs``F!A|6pJ$9VY1u$%56^l-66>3IPq_9!fTb&6gk5KeZsG#(@`n zo&xOqzK|9@r@9DjY+`F|*~IBTdVq)-6=kI}ouwRWaayW_p3ULS2=A_H5MOR~3mG`t z#P2)rt7+|xbvMU-a+^yhWDeMmj?6#O3nw*AUXQ*GKF2~_+dihXSuXjMt4MKw^WfkViJ?lf61r!H z3ht*}rma)WP|$cS-I9|(R0@6ez4}DVIq0amh&yxAX0cyj<}+byW>40C8X1?>UwUrk zPCI+b4yYBSxpih$LHMeO!xYyH@PcVU=6;xvKb$lU`g!r_~C4sM?=>uc7DR7^9N7?tY22V`I3bU(7`6 zRwf2yx($Vq1}#rB^dp<)P5_SI?Pzo^FnORr<@mr_{7sl`) zn{sr&59!O!I|mb5FB{!BaWFThPxvHrd+nqsp;l)w>?t3j1GZ0EBZ%Yf32rzRJ4qT zx#z{xgFm67j08cXac5h)V72O3{4@0?-5l?}*Pw9f;EGwLIt}$9DP|5qVUc|?z{<3( z7KBL(^1@9&jKQVrG`Oo=&Lxmci$?r9e7r#c`AVI-Ug+{mVDw92E2+ftQ9_=uIlQ5i zb+YQSOk=*eK<>3m)uMB7G6GDzhFpeC{3aq042buFK{>3Z7?NRd??TD7)HTT>5gh!% zScd@8(Q274Y|#0l?H=FPmWDo{HJC0EH87{^@m(;!9TL&ITIu~tdF%jqJ% z5)u%8(U>W>hjcm0;OloSG|k{gv^CI0j!?S+N=!o&GtMihgAaH?3lBc zCy`RK-HU1Uce?6sCfH+}&7vG9S$f5Cr1H5OCk?LrWa(RzlA<+^VK;C7u}GpiPj<8v zSoe9aLm8CUCVOY|s7{9+OKi8g#R0>}B`0>w0$|myYTcD!!5TTVjyr^Gzihd=h|26s z(?@iv_F)~@grtwB1&)#54X=Fw`1AWT?gEhH*rDVV+gIA{66`Q&QoK@Oxzm*t+_^tl z=E45*7HfRf!QnI$$x~Oi&x_FHrZ>d*ANcR42E29kOg0Wm4cL%}*7Wj~Ai4{;!8BUQ zLIf~;at(Qi{F_}pxu-oYA{OQ>5OqBr6-m)zAU7c!$3<|>Ri^1jzViyGT0L|()RiDV zB&!uUQd6+cl3lbf+8(T&6H$SZsS;!x!1 zM&890kcsDX`n_Vgjnij3mQC%kFw94nrD)JoFbZvp>|`goN+r(E`4zAyniy%>B}lLi zls2vWChGAzb8l;X=@cXl4qBR2n*Y;$UrVV+%}!FEcEPZH0&L|V!8_((t?GzgknpEe zw-b5`fqON}@XUE)z!cH)-JrmP%2!xlA`%hQ;3!MM$=haXU#ewh#!9Mc6z$~T7k~oQ z4XO`LrQGTrHVIb3#FbwRYg2q7R$UJh@`V$MYLb@isk_L1(kX8 zRX>Yq*mf^2P1G)ZgE8%ULRiE0=VCk4-lvhPe2;m2>bq2*>~L!`_AQpW9dq)w--b(s zWOQ0m9BNBJ6Z5vq_gl%i78htx+CyCQHGHpIGaMgFyrIX$?0y5>rvEg&013`r*QD#_ zd5-yXN0D`{1+3`(dZV^jU(Q?chV%vhl@46?tKXBr5>E}4I%Tx!Gufa?RPsxnC+P#h zmK$fu8c7rM)RxCQ;K_t>2MR#YiV#2N@jz)76P6p1=)#8(mnY>|mbl5pc0(e``e0;_ z79&i~@yS}adMM6{ij?F&+MCP40Z`6gb=zgXlwREHc=Ti{(xbPq8UeCF08@eZSDv-4 zWg8cf8>_p)=7!_kT)U{&Qe!0xuLcGHS zFDg;XNF=S7b8!`x2J8~3@*CsGLX~MnSIZ=*!tg##LTNC*F#{U$^BD8+EpNuJG*oH7 zvP7)`D#9IKoRuGel&yS8oVv1e;VA1>^USHw1s?r}#Jt|M5pG!hr zFjC3Qmy2r*97e_$)Yyy<6Q^}fm@X=1dF1xlYm$q5;haH*WtNyl-hv8+%#ic6H~dMn9yQ zAhu2X;&qee&(q9ET$shwFs=A*%o1PvL=FTjvgGm%|VmX`lRJ~=Suj0)krQ?%oShD zy|Ry~rZ3lFh=P%mx9!Q;^wJyDDvZ;c0FvHFbqXs>L8|-tTDo6mnj4DAv(}&Nm;wzC z`ponMS`%vtvA7fUF0N29&{Dwo_N`;f+Q|kioic;5x6T@lv00G`~9pJdpM>VIXcy z1mj8-UYV6zMFov%ow+*EgiX-|FIYbixxQ~9|5^&>yQ;wQb`KOavP)eDkeULSdD^Ir zq27$5pcb)K1`1R@*Y{@9@MR_?>2sEP*;#A~VkQ2(jmtdFT9PWN@r*A}221kiD6zfe z6Rx@mWNT55BBgh7`><`Pe3Na|3~K^ySaQFFa>*UM((@h{X_tum(}2i#;qufoVMp0p zKlWfjMq?Cc)ZLQf1zriu1DT9`aHUv)I97Dh!1w@N@0KrBB)X#=JYW(PAac;h!uVPB z8(-Fdl)QIh77k)s*Zs^fb!rU9$~3bfRpWgt{yFj@fXBYbLG4w2eQ~lPq^yO_sVzJa z1#Utc!X?ks<Aam^0e}j?zc;^Uan6|yKTr&bh8#ESR1`g5fl>7w}_Ij4t(D6a4Mx)XMN5a zDg7Z0Z1NbamoEFQTG@=$YvaYL7$D8jx-k?0c7;eGHn^Mtf*_ofeb~t@* zmi&49mb!OmCRy+XErJZoA}8^%OIi9vSgVXncWzDUfr|t0n1G9A<}nB>CAOdIuiHXf zH()f{jaPFP1aIb)igu@Lj13N&6;`S#8iM_#92B}Oy_iU1<(C==2^j-!g-N$WG0EJS zNHn$r%}c^JjvgcBrDhvUe9MX>7ThOuwy;MF>leNJvC`%<7h@l!jc28#STh#Fmgz}s ze$Bb2tqLo@JRrCG*1^UVXDxnsreD$`UGx<#2Je=GT)dSC+&+e?ffD$?1BAq3J?%ccBYX zNf1!?489z5-Fn$A_x)0-hbFzPE|mXz?BZk}#49HGYya@A+w8@kF0p%o4~el7ll?8e z-{x6%V~n;o$8;W#o-!mW%+Rd?g~@YRMva3rj3(mF7@60})TE|_M$KWLrp)t2X> zab=EqnHNjThsfWhouCau7?g1*#lMaBRoLpL@GV0r{3q_-GOF!%UH9d!QH8b?x3;*u z+gkyO7l(vEaR~_yK>}?l(BkgW;t&Ewk^n(kC>o?lf&?c(ad%30=GtrQGv_>GtTV=5 zV|_WF2!lcX^8Dp_?)$oamuG#g^Nksy5lItmVVz;hTKjhBwRIK_B@%aS+bK_)6dEv+zo>_D1eE+ooc5?up5A>P~s^ z2kkdQEsWZES3hWqP{KcSw5E4)%z56Fd2osXoiY2U_csE)LavOD4Qhlq#^eaysXUeY z09e4h=wEM;!AQQ4DxeiIX0ivA9fVD!%KmOflG0oVUNi+qKPOx#q0&aFpQ3XklgPbo zN1=NXY)9Od{Hl$*i=;(PAaDOg;Bpi(Kscj`th>*~0C2Yg1a0a$H>QM}((=$b?@P zTB6BmMIAeloc&Fs&sb|BV)O`hMU?Qj;vfbBs-lUz~!2sA#T6Tf6MQc-o<@&l@T64&E=^S zWzZ1JlNmRcRta5TXp;8<7;^WDXN12VUshq8ZMJ7oPRsKBp2R8Km|D7N#5a#_xl>je zqAA?ldb_?1`SpTah)lmBfrA=ozwFRn5^Y#x7j}vP@*jpFUd^3UHZ}~8w{)OcaVZ>} zwL0O0PoBenc#5A86vAcPYiU=%PdE@@oxSVE9cN1({Y&QSzm&RapQdBckgu6&zGjQPYWmWKu|*ye-P%PsaXvEA52Vo#G%oiY&T#V~uKvAVh>=Je zJ*lf(HvTM~D|6cSlM$*K=tpzdk(}s5BNw_Pk|Ft~kVRaibi*{`P+-hAQNyv~eYiIZDc{VO%Qvo>nTOiN zdYoGsaslbut9-n3m`_3d>(^#t1VTRM4+OTz&&4Sfiew1n{-H2EJKt?LP2XQIhw8ku z`{|#{D907Ol-`%*obC5C+_QiWCg-cE?;u{Smw-f6C)zZlG~yitZ>{;A6`ij@GqPQE*?H=ZRBkyN;<7au&t8ByIBhlB~6U2yGIW!{ltlZ``*I*LVCe`Riq;;8y+j&0|PG%so{d8c3^r_SNOcr zQeh(3VKJ=@km`|!8L@)Hja<`L-uOMAz33F5tY>fPf$CB^T1?9Q`x!xA_up)S6*CHY z>R99(J68qZ^=%tL8M)VD-zE3aj-GW39SIb89LYeA(Co2FoV>HaSkg-+OZs)F9nk7cby2Lf6zr2e# zX54K{W*R$c?$uW8DC_T`NEYCWMOTQTgNj5)wMFa^n)irCd){9{#@(!fMFM3}XPqWQ zV{z%vU%ICWP)bUzC#idj|Gu7&rV>U|=dOaZvgUP}@KMn(M8+6=wW2C}*Ap9;!M6LN zgEq!sV1n8jrn#ucgp`PM-6?Tksi$yM+bmO4PI7%50mHe(_H? zH$wQo|1{S2rC#cG79p=1eB+HHpITZ90FwXn2ORK!lQlQ)E z;}Y#Rak^JCNJATRAY1=HT{=P!MRQGMGEO3UaV5B++qd@|AotJrU%s(g_Os=~Yz+H) z=1s2*%CyDbqgDQs!w^=_5ZVvvq*89OGTd!!n8Eb9UkLC$yJlvnG$~7(8s+qsHa+de zO(=TZiSKOmjcQ`!xoKjGic>8^1{&r?^l&Rum7+zzpM_;&A{=ZqFIH!;g!KFJEFi)7_p-*-Zsah?W{0 zGLuA4E0rFfQ|Wj+Yaw-BK?-h#>J}e;IvHga4I)Gq;?H}h$rufI1+)BH4?{oqMSy$n zrt{k^v-FRALMb^=CcBJK-`D=RGU*bYUANzUx3RXF=0{5>sq@L=_8yfd;e|ENh20>y zosj&rf%%cN&5#1JPc;AABne^a(pA>-?EApIj$cYIlK5iXDjn;)ceo8bL>@sB<^fjh z5N6+r9vf5FBE-vHyPE^g*T*vyE`Z4Za;qczN_YR?oR`wW#fI>z<|Urm#%4RF4##5w z>dsA?5})>)#2~uQz3LM0O?`W@jW{=K%h@yZV1!0{)Mm9RV^`-Fpi7ofRg>(}2z3dR zxb_a$9MXX_s_NER4l3ZBz|!Jw_q<%|2i+(LSYoV8;+MDGi~68gmaV3A3rz4Ew14<> zND9`0EqQqNFnDg|1vJy*R@>(}DL*EkOaGj&4xJOz^H{NZ)*|$te|XukY36AlSF{g% z_$eUwy2$%6ec5p799PYA8>km#%XrSpCL;Q)S?j}74V+`e1FQs*h{{d-7>sl^Z&##z zZR76GkFKtY+7?p_6rS+Z=q+)XUcIx+YfrC6WT@2#*1o%iSD_ZZzD~vuc6}rk5L_Z=?`P1t8 zDKr7dLo>w^(k*efAR@5wx!a?TWk<`CjaGvdy?lH6l#$aQk{9ft4F-_FC!V z%Ru(n-s-*_MB3NediDW>Eb#T!u3x(92k%yV*@YNpRJR`F@4+{tg#x5Y&m_2h@)9u< zjUP=?EwKKSeLJT&uPdgaE6&6n)SjNrOlq|8hd)!)`CWE0bo6$_>SO&p_WQqkOKGL~d@$`cV9^7ILFOeKuK~Sq$hYdg$gG68iX@i$F7sD;~QXl1hFPMM@t`miGrD z!}0Mk%oz0OBZEH_nHy$aT6^pXERl1$`T`4|G308nuM45lbSpmXZ=u@u0|dU zJxK#+NbgtCn@?&;TbDz0B#n*nQ1tS&qpXJI=NxH&l`e*l2zy__@Y{>ZBR7d!ylb01 z72Cy*Z#yMGs>H&9jYeJmjR*U9w`T5YMx#VbKH2VXYIpU!@M}^K$Fdc3gMOU9PgR}6 z%#VG>^kv@?a8I97T}(BR`oUI}r|$&R>U~R(n7T48b9v^&bz`1;spyW8dp`&JGt#5F z^0^#W*in%mLSx`!V!Wro*Y?cx40zKT>KPHZvyG5>-7`lj+Lk($WKzpHLKA4Xx6{U?Fj3?HR6Yz|SiyDF~Sf+Z_Qlpu}$}N>LI7lRm<2Z|B20%e#`F*_$+6 z7yIpXC;BDXaSwzHWC~-VbeEttnFMc%x#?-;0K45<)qXV~bFIT}-XPt2&A}w#sJ|Mu z;g&?YKP*tA`-83VyDeQPp+~7`OED;zp%dNcnNo)EwPVeLQt=JymSuaB%UegdwM}Cw z-IrU*Qxe7-kjZ`hSE{%#w*(_yp3KJ#)OMMfVEuds9_|Y5;Vp>Z$S)+E>*xQwKJ)*i zi~c>@JrR` z6ih5Nn4E^2RIAWsmirktiM{&IbMaS0oNnK55b_%pwds=QO zig?n7tZgb<1L@I1RbHMaH-0La+I2R|G%?G`c~9}LTA&r+z$>4L)6xdFKNQ^fyx7L3 zX*T9R6hZ3GLW*O_cGLA38iwUrxoqrdb?XIfYQHQ+;C+*?|2mE?4dezgd4AY6z8&5A zzFs4Rv3y0Byte*B5z21cB7S*$o=YiEG^C+bJtX&h`FOaT+^FMXc2j8@3~#o@P}g2 zh~lBfhkxCn|K~TU;swk`Yv&(|Qjp3?U~KLIOyu9>LLGV{r<+Uc146U&ibY@6L%?5E z638I;cdir+>zTa>)L6UxF0KrJzIH{JAWJXtm0>#l{YNuYMbf0K_=9@xkvDPh8mFNV z1kk#nDd(a0)_I{s(Ubs*P_KI1*c~5-!nI@l8HXod_&rnecL;2UeEJIw5DcT$KRX){ z0<|fhwjyA#6kOs>LIB^3nVF25@^ner^aU@5by*ZqGV$UTX3`iq*26?ZGYU2&+n zx;Z7?@%08}0k(NE;l=m*Sj#K|9lnWl`MgXDj z59GC|)r$d<9hj7IB6|)GaDwc@i==)QD?rzXP*9S^ifL7VB=(?JWkB@T1~*qnSy_P0 z@<2Sc1MRzNcbNNs&Zzwokz*;wk4JxsGJ5wRy=*7!U5TwNDPyQV>NC-K``Uspx8?k! zk;}!vv~NJ?BVEH5jL4Fh<`94gDJyZvIn~ws>%$@=n`25Z>U~Ew17lDgR~FodARh7d zZJdXI1pnT-P|G=>t4q>zvx8s`CltmUqKODigL;scK!QJ|aD!EMr4FfAzR}PhDtK&b zHnDUxQ}I$+J-)?1W)W@DYcMI*=|Rk|Ugjgt=W7=*m&{-rW=hpQ|HlDb)xYd2x%23! zl6+099;rHJa(U|Kc&xZ=gp?YQ^WjK9hJQ5kRfjEa?RPwN9OOJnYMQpQ{zYFDJ|+6VkwJIm9&Uzx-~NNEBNp4)#DeQQtj(m zg^^ksiV#-Y1{ot=V(u&9rQsG@la{pzd%2(Jp+sq5zL-TQ#15Ne zGK>S0sHC7vNq%2FIBn?~jH}$oSTrF7x_SFY<)bN`oC%i6=3Klt8grD27sM^E?nFH4 z+HwnGS$QB!6nJ2gGss;C{w)(OZ}0MmoOOdzU@p%|ov=?e=mkz~nNM#GTcsp!6z#XO zFGXl;bEs=-6<03K8%8?2c5?aM!tXaj=SQ&E0tO7bS+n+`y{iS2Q!BY`%!Z?fEMtes~@_NE|oEb(xKs0_0L!zyw++za1(Nsh5^}w!{!l;!=u*UI z=Shrv6soztYL5W|!9o)B_Pi)py;h%;BD~2bbGWSnJ&SJgxZ>EXMRQ8*_I$SYhzocM zet4)OKL&>ONdJ=5FLE>Nl+_ghF8H6D8znrigBI)EEFXx^FOvxj6h>SC5|&GJg>5`? zujkarIrI)Uu)6b}Z)l#gZ&XOB1zrV+W4ky}Ws}=V9UedU7e?1=UHDAzZRxaW@V&9O zpP0^ydAqLjjdB=MVrzUv8hvVP`hB`0RjXpZ!jq{N25HO{)9^Qu<~UPSmR?uD0R7&t zxmLUx`Fu=e)rmYWKAvGpDRY$PfV@!vvbbF0AE-45qjC8)rOV#(>>g?HC+v7|Is}q>xLbe_V$CsBK?BG3@vog)|X=D{F;N9 z96~_Xu#3cA2;~YTh1}T+L}vbNHx*DAc%i!y>2F6bbUN8!MFo;EiMEO+MS5;hcT= zr-!rjg1X0J4jU&8Fcv-MGLMicjWcUU!x+C+AM|lSwaEFEz1Gd(#T{GIM^)snkot*^g>^mID8)HKiVtj zu60?@8fkw}XS$^-*#a`iYKohBV&P6TX_*@fk%r71^N?qHdJ?XSC6)mk@t z^y7!(!lBljlPyUE*-E{l+8a0PH5^|vvZ4EV766?ZoSEe_LnD0}+CUE1=@2^PF3nIJ zZ^}n`HPqJUtJmPQXQLA#W{he3ED7~0^8U8(cHBM^w^M+0&!TwmDO5Rla_i(Z=pY;D zESpn*%)z|0ws#1pf;g%8cby!Gx*bN%q;k;c_Krz7L!vmfeIdJMkE}i^I;+codKw8Z zHr2Bf2~5+aC9`3TRmb#I-N+wI))8|>d}kC>m=es!o$uQ{k)W zmTng33x>AHxuf3t6p@Crz_m)PMX>?SqLWYc!d^+^q4x9$R(-5_r}e~3jQ_P1z~Ixt z&nTrzg-gCnk4jE9viz`G4SzK~CqiyK{9K_FEnd?@R1$f&TjML$?35Q{!ok`tBN#ME zFu~|s(S`OEuvY`jA%@5S{atch>S56t3*)sTXL2QEhrawqOU|TW-)@Xlx3(@gR)L2r z#XIzsg=XKd6?^XYnpHpHbzA8N9tt$H9peN2^6o=7SpbetQrs6}nB`O<@V69R1qf<+ z>qbv^`uM3b?=`lJ@Fx-V??!%(m`u$V>oo4OQz=z^Nr}-g0^!uZBs96j+f|sCCmz|{ z_oq3}mV}c1VD0&HC64Ns_$~SY){=5qMz5|i2{4bD6{=HI?(_rRIyE~I{dc{P@EWM`^^g_vi_y`eb z=V-Qz+yF(E>FprR)y6lPyUjyRBF(a*5--7m;pgiXJ*u;9*^^{tqN7 zax1OQ**>zl5}g#6J{EXS!LV6NfF#zl6wjDl^Kml&mpWOcJ6+&CHTp&68I{*}@3XbA zu9&f$ZXj)$c**dYp69DFa$e@gv2$}|yh;l7l-11c16g5f2aY@)yd)k(_TrtZ9U1%G zns#y>px#%t%jQh{#h>|0I49~U8GVtj#aUF>EM;KOJ8ko+%zpGA)ll>-#MuW$hDGN6 zqUac@V!_^OsaATCt5yY0{k5kq`cYpiWD9k1MQq2Q)b#QX#kN#(42((EN z`f)O8E*pO#u^7C#bYm%1mj|>O&1NN@g)~*APogp}dil8e6+i^Qy~x?;N%;U)DcOtK zY0gQH1M5-7?2J%BvT_|2<7NWyI1WB8B;aNhYy}6of$ChIkvK;MbSfgTEEy>A5S2B7$l^SQH#w1cIWocUx6*lsv>k?eU-{T`kN6Ih z{j!%cu(7_I-Ans|wH&>vaq7C(l@DzyUx@C3)SN|v-{h=w6OPlK*kDTRdx7>MjbuI{ zq@?GJy7A7~ihIcQoz@_7G$?P^FXdk%Ryp4<=-Ynp3(*XZl~l?MpMR7sc~GsCB8Rs2 z(5{v1jqlqNgojJ;ZdxqBf1HPdRn@vR>U5-U*Eci@n5|(1bI`N!RM31;05`<~wey{2 z&6QUCWy^&-e%T_2P1)x%D-Uuhb}4G3RsjE(R$hjuH|%um3<}4bbe()Bui?5WZc`mC zSr?ji1vrv!nW1cKxf^`$9%{K7AU6|oY&$X4zWhZhZ76$Y+nVDi_T=IZpAA%4>%6So zWaZ1Y8pBpi!}8@f+NFC10B?=J=v_%m|Cl8TUE^r6jHlf_Eg1qIxM z`RJ^P=MQ|LeSZSteBh|QK1Hs3CME7DnEmFOt-yjBq4PL!onxpV;(#E$P$H_7GN(q) zoaL~dB<*DM5kFMut9h6iruhvMJ~p-_2W`yyTY6_?egTzxu9ctunsNiw;SX*mLkeZRg=Y?zcTiSQ@U`@`vDacE%_VZmh#7 zwmq;+L*S>bIFnQjxFt0W)+w%3TQWezm{eL&ZU*mQll@~UOXaQ?p;s8u1K)bYDg>Cl zuDr(*&2;U}C6?Vi9&~Hjyxvt{k|K?s6Dk(Rrz?ugrBTDd!1&=$yr}Px^i+1P4mCB{ zEc>2!AUB&z!$Ol^0$QgvW*2m*XcfH+zg&rKZJ6IW-rDg)JX8KT=8S-X`&(=3=|>Pk zdRI&Vn#$jg2YH{APS4n(eb{I1Q^6qV)c|24jazstqRCBU{E?#t`l3x)xzEG`8~i66Wh%-Ci*oUfd&AJNmU905SQh@>^^|1f8pt!U z3=Ty?ua+oEu6m5NBA}pz6@Y@-3J{(@LA!}&e=0p-ZKwQj5+0D>6y}Kb{gzzN*Z$Nc ztFy{}Zr82%q6ce=%&QMWINn0~WW!F!!zK*q)L-otWT)#2008e3(^KCQyfX0XCTm^*(Tdx}><9y~TFC@Gi8NfXj)P^=6YX=1c%rRs92?95`rb02?(co5Z5 zZRX_ABkktp@bRQ8iE2v|V{r+7nrKYAxM)wYP|nOs3=w#|iW`fnEW!Nse5RB&V<79d z;+c`>Z2UO&3eII4V*g=59OfH>Ix^1HyDB1q5@qpxws`ji=`di! zQt)1-cJR60u7McH)4z2u3X<7zJ1AzmR^dpneDLOp& z=u{P|ss-GzZ(c|WPpZ?dh$=&VK>_M7k=o4hOvWwE8R;7QmNWeY#mk(6z7u;r#5vcx z*w)^!<*H_D=$XszTbXs`Xf^r#zpS^qvNsf4)emeJh1IG&ps%;a1tUM1Yq`}}x8J4V zem+K9lxnPsh$Fd^l{`>`IyX-`zf~>XOy4|-iVX{|LB?-KdXLY792@Sy!eq3<6-Hi4 z1uYyfYr6^>TqOx1qN>s7TdEfC{r>vYA#QSWJW~ciQ|ZZ6TGPwfbK9wy;{EjN8C1;b zmrz_&>YkY7>wSsWVI;3J%Kz*`& zg`2NKHr=2WjW&}%B~+~BTkO6va`F@>N z4oB1QZBF(V&mAY%u1BdsN$eyAswW7rB7GU;!ZJMsD<8ifbMaotr2BYKvf*iY3`Y{k zlsDJ{+H{Goj)r0p8>m!;8*4svLN2IH+b?3{BqY*Yo%}AjCV6ec!5!L|7D4pOmh)CB zU1~z~wLV4FcJUDdHtW#YiOCT`XUd0+`AaDc!J}4!V9~JH9H|6Im4^&kUtkz1(l9a4 z0$Nbhr zGWHzRh%-VGhFkN=fkjq7Z`}U;p9e2>veBqbu>DeUYZWe>KP2P%lo5=uPc?@#dgl3B z5X$`AS};KH^-1oc;~|AD4;%cJUm#}o%@s5#NoK}$NbjZ0uB8a6`AfI75_t|D(sk00 zN$i2mYyGE)>BCyy&I4D6#i>rLfI+tKa~VD;32{qvYEi==C7DapL~6ix zXkqQkd18BO3{%O>mtVU+uYdPg&vwQbZtEAw3OX*=on)NI4Q+3<@?V@e>v^?a|3d*H zwT>PBp-2qTJv_lWiv9Edygq<6mIjl)ZZE2p9hVMeQ)iAWIL@u8x7J)B_J18K>{uPn zX#EbcI^{Xowu(Y~vBl9FQ%|L`UO3^E52@9o$|JV4s4gX4@&aNQznvnLxr3>^1J6Zz zTb1MfM>l=nD}7qoU-erT(`gCUPPeO4=TCKBj|d)`44a9Nw1S9PitNoTkxqNg4QtXd zX-K*Vj7w-3j5R1F!ta})*OCo^sCR!T?(aBW>U8JrPrgnJegKajs6QnBpuWE9CO&*H zOB3q}3@Q)28>{kVb{_HtBtkX2y{m*s%IQ6V5mptaEfq!1v<2|Fij~AL$bdffE=TD! z(?Ygh1myUv4>{d8#k9Waz?cE^^H|?R=F+haYCs7s9X-CN;3exNEZfK)p~Yzg2lzm7 zr&x98AD92XA#;=XCH~#_t{HIj<`0EeaO@unrQ_MRkixw1LE?bcA%bSRvG_5?=O4`! z6h=HqKMh?EFO|&lEvU-6MF`*6;phn+pKC>K=XWtb8J%Y$$`y%^qPls^UJY?V;vrfV z7Ly;5z{*`yD=h)TgqeRHU?z$8ZD(E^X+D1_idkqPz4v2(HG)Fa+v{NR_l!5I*hu~g z0`-7%qgoq(%x*(Co6=NoF<$1h=n^>Ce>yA~1 zJ`z;hT=g%}dYy~jSiEg|8N=o2StPKY0yqAzuTI92e&=0@9j}7fiD2W?gSFoS7xcgV z$X;Emu}h8L3S$eK*FchIVNOaB0yd{4c#3XnfO6nIpDeJ3G5MtPO=*@YJ$myxTw7P} z%P$IhD4UHhx2JQkzF+tVk|mkdn45%_1LtjutFF~QDJXvTIkKkU_Ot_O-aqe>@PA3t zRk%B$I_#SD$sDt}XdAEJdt4kENb9@wXd49H&=7!@M_NHrrvbr?8y0@QKTA4}(bZh~ ze2||W08IxMfb(qPQ#M$kzM%A}B6}#tqZ&Gc(MpK^!uPy3j89(818F;xoD5zsnWYDkT`swi0~j z65A6}wf={K^D<|B*Z04E=CVtb{22T`5Eo*9YPvG?|CcTA#oUY3d6OxlJ$K6Cok=o- zmX5&+_l+!`ZqkjIZ9SoWz&p*Ov)znMP6+Dv9&rylCNr#uURE{YkVv@2m0D@JF?8VF z%~VyPE)MIAl$>Dc0^Z4@4eWk5R?8!~gqK*jr5PVxB4?mfX0BPc4O(>=bu~xX&?2oJ z@YvTC2+iJV%8-2sv!1hZ8 zgPG^i#5|jfEjg|zyL+yU>u%@{`p6XDOcxVk6Ce!;C+qLS>6=o97~|+0?qFd{Umv0r z2h}$P7ftQ>IkP@YwuFR{^yVs4WA_S~g)6j&wUbgmdKP#GRSK=En@m8WfG=PL!dv6B z)MbV8(bg<^@1jt-Px{~nod)K^8Z(Ikkl*~#%B}qdIs%YAqfOH*UZwUX#tKR18+C>4 z7+&MRQQsYY(f^p`Tpg04`iYZ_9U}ms_Uy}T z-SZ?(T`|;}IpoOXVAUsQ&98}j9Y90QY4|wQ=oUOTHdV}sm4aSHivdcD%8_XR+3AZWwhteO;6N)x;!xi95X65u=wApk>fAwul@Qu#fpi#XW|vlF1(q6^$V`y?02 zESfRBLT?8va{I>;;}VKy9%bZ*TPb~bG3%mHTp&nQc4F*4-HQp!|NB37pMt`jzHSI7 zc~UTZC*d$A>y%lZM9$f<`7Wf<-=EXXhoZOa|2WayGmJ27D7HpFALDcMBpOv;+cK6% z+Job7!73+@g%WZ8S34|e7x$#g+jZZ_-PjwCnXES;RrMWVNxOZ*!urk5FOw2G-8aMy z>~jbv>+7sFWyS(?EjUO)>Hppfx}^;t_ul@^Q7wrjI<0$4C#Ls&vNB+fy3C*F!?0=Y z1tOty-4#)p1pFi059rB^J2E%(YlaNtf9h2TPmrz6KxTtpJi1*EJfVC;D1c#Mo z%oQ*F+v&EEHaef-TvgB%-SCc z@}%S4$us1T=~i)0`aSB=!L-w3k=n}FoBj5Qj$dJ%%uOmZ?K=~&R|j9U<=Ls6u#O|Q zCx08Wqa$<{vy9QHg^gpyX<-VGQa1JRw$oDS2x15{n0j{uy^6a`Y!($&PMmV+KIyMK ziUpzPYLq&Xp|pEJ!)#0s&FfP2Bj@T7AMG-qO$REBquk!t`9>a(^@^CEM~RepNXCAq z;NPBf{DqPmt$l8rbsqckX3su#jC)PYXAi=GOX%iwX^y;s1K6~1GcaM>-fSwrYOmie zaf^58M{+JO`Xo=VFY{F=;PGD(^tVqjy_M-fnX%Jnl#i3gfa*8nLn0kv(x!}0peku? zsKja(gb5^@Z#ZGVs=YptoiwapDIDD`)>AB0{KoE$c&Ss51AW)xF(foxj*4?QJ7;0` zDatoE^pP`%Lk`wzbBal0?_PWXYLO(K-EKrmplkJCx-Is}k5V}6#w8!6KXZprLvK#S zn(U zg7)Gy-ud3mO9?wBLcpp<0+=|yBX!``{GV^`x$JvRyv}um;w6+=i}*6pT#WN zk@Ebb0g!!^!D!2iHK~Uy0HF}!lkAwVD`+9rL*?c5I{9zR|0q4sWk|<)o`9=%|wh-V_kv;zLHQLcdd*VqzuX>k| z_w=ptckjT`16oN_+sRp73HnSe?R6#vkQkh@ZW>3x!3!J51~qMhN;`z!%^m}mIR?#Y z;Ei?5riRZ0chrlk?lwZ!yX3b&F}cuNaY=xIwSp6STQ#7pub{eZi>Qh*ad|DyQt==V-$w={fCA5gmRyjwJ#R=I6gSKL|qEKR%RbA?|5V#5aPYt_gO~znXg9g<==}`j6JrDM~g8Z zAmMMk`i!69ETSx3(tk|1x0CL%$!Kg?TpCMd4=!#SXe`SJ+}*;ZHHoh+Sy3N{+}X?V zc+{YP5?10NG+cRAFlVzSNP=~sNL=ltN!POT3})l$_%&WfoPNiJka(X@2Njm% z#v1=MEz0)u*gwMocw~%q`*q{iN_xZO^uNl9pim}%7dNyY;yo-B9{%9A{)M9k*gFhM z21*xI1(hv180W9T6b-ypRp_nmjnQBV=`|pJ!_*qC{anbS8Rz<^Xot~O=a#Jg`Nwl4 z4o;CD)qR@PI?Uf@1MM@0(6XV8f+(lfI8S6QHNE{?pVB@<)b5Cv_)t*4Rvyejx*&(X zclqSP0U}0+2h_A-wm4!W--eI@Agh7Cw>Dh$*kgL|L_hyG9?|4{HW{_W79Bh{qHHuICnwP-Gs_1Q~9gTcvJgCLaTFeusP z2_tc`%=s_4-ha+Y!awokb?)XIvW>SDAF%PrxEnk^cV`}LyX!ySQ2janwVUH^^WNCq&0U+?C8r4H4 zP7KBVYQEHEd9mzf&{7C0pdznxBo<|}VA-C5vKp}|3tL|SfD5)8piMgHu{}gEV=1k< zr%=q*Lw!(OCn7;Qs$(a}9Q};zYV3k*p{Gjn2~B*PLTAZ}Sl612@sWxc&&^$vw9*1& zP=^vIG>qA@=9N%jAmRCWLbK5W1G2N!O+x_z6q-ycUU_Bt!24^v-5c5OWZHiwse&jz za2(tbG$=6TKjK>sBbjTFx>egd+rE!C3wme+?~s+3KCJoCp&xxn8;ErTD(Q%~9-BnU zpnA2KrLbFmdY4qU(4R0ixp$C-5d7VKcwh6JusxM2Q{SK<+9PbolWK5T)05ts6VN-_J~Q%P~v5UrI>IaAWi0`$U-?IBU`|ldF_# z>r*pEu=Ua<4ynC9mw4#fT-^YbZr)6FM*v9i$SGU#gsMmDj+&Q*JgyV#w>I)uQyVv- zz}0B6=wOBL<&SnfHs_1iIyeA;{tpjpJs<3e2C8whHr|_TL-iuACt@Hzo70YN9Fv{u z62q9l-9HqXo`GhhOxt)s7d5dFE96GnuXljlpRwfXCjjBdS?T;b)TmMWon$gEwma>v zgN+3twt$^{mqij^QyCxe%YON~#~76MiRH`06cqflHUfD#@cqZcb_G8da9#5Ybxc1Z z8N6{OOsn9mC1l0lEzOBfn+@pgRz#=D)F~tQHfs*foj+CV-&$xD+q=6)fFFqU6)R}W zk7p*b_L}yj_CNLA@)8YIVVtkUReZA$nJRj8s?cmjtNG^8Yh9;TxeCb6o=()U_9;#q zqb|-~W4#`x7;wz|V>C1W-ZY6(`+Rh=Wn6AQeL2Q9+g@aG!8RRTKoV25rFE7NANu`a z{+xR1y`?oL&*^6IqS#q|5kjMVwQ17RS~^!5=Th@p&vU~+ae#NSTu5z<^oqxLv${2f zTub)O^9u+*(mV5VALQ@yO`Y^g^HOkK)RK(F|;J#O5zFl3(W zvgM(f+ z#lwkBmg8sllwba8T&D4_yj!U3o7y~ZSP`yo0u?ta29u4TFxM*j#3a`Rk0!&>xx&Tb zWo}l(C%)f(hlC_Qt<35|xetX$yoa;Dj+rd&cvPijwAGB8j@c6F0VG}Kq4L@jI*SRe zoR{*RVINEQ=yq=@IzHB@l@x!)7rsG!n(|s=pnWDfA)KyvvAdp2!iZND`)c#& zY>;aRpF?yP&B+nGe7wC;+K>lOh98aIz!&CuEU1gq-86q}SmI`iph zR#Er0NzO!JMiV05VCIdrPX8?NMOujS@ADI6Dl;otyiqC-g@2UoI#KyV`)i6dx0fEU!rswTn~tO>O))~_&iQx*ua8mb#TUXHm6*W=g+0~$nX53417}sCsxg1m)cyT#%1$< z-$)^)OFL{wz}%L_X~_Ybz9hoFlL~o!SgAvshzXl+tfB*ZNix~!un#<4zGG_*JMa`{ zwvpZXdz&Tm?K6`dq?e0L!fV)pPFM|eGDN$Wp{+)?IAS?hBZaqLiqTdaCY;&1xCw^~ zBm23@c}3CN@fdY1(8!_@Xp%7hx6@$MF%>fG+CUNUz5+e{C=t|iZ4+Xn029}52E<&IEuME4Qa zl3Rj)*p-w?ggBi|i`?A;-HpW4mg5cBZI>A@-Xs&A zRZ1+4>3+-E;>U_o|H6;N)ax;$rAPau?sC&Ki8arRakW{0apbbnT!gXpe)}2fKJ2^B z#KX2!+#B@U^wwCfs~5wN0N+9?@EhaA(Z5R4B0IgmZFD(RrYmAP{N3K|1wtTqqi8$4 zqK#WP(rg&K6$Q|zO;k{C1k%JZHk;44*&8Y@x-l95`-gQ)&qv&;pb9bv!wySV= zXdJ0Yv}wMREEr^#$}K=H4r=scqjMWi8jTA+S`W7nR=)J9*wf8>fp0m$+xc7fw z?tGY;$-|i6F+X$8G4dPV(pR%lCU(ehl9((#8TDPAyy8stlC(0FJ`+KVh}4!K8Z~xxWtZcz%Z}fPqyF^?z0zSw zsBEXlGx=f(OMZW<=6IDI_s;0Z<(aH6%A!iUF?AP8)W(^e9*-GrPuBfi=AplT_+R}V zAicv&t2S%c!}%rdrK`NIVncIWOF`$0S2q^vZ88U;=k7l0-@f%ZvJkJ{@p>Jlz9PS3 zm%Ot90ieUenZ4p44(M8ebz$m#WuQyG^cNKZ_mu7jxi6ORdtc-CxP0lhQQ`Zio0xBd z?M_rK`xVIDL~UqOnYb{7j+c^*_8X6l`A|2l2u2An@S3*lmx3sDw4pRKCrx;U&U&r~ zX)js(f3r3Xcaoo3;m)+%i@Os_n`}2FZU4pU-LK&2F)VYn*KKid_cLiAG1+stt*2CH zD)^QC#Kg5iP2!qTuTYn(t)yt-haCQZ$65*OahjXFtVTlP_?B8Tr<$Mj`nhg{?ZoL? znIx}e!piH|r2YrDtL5(OJW}G|a%%AILM(G(d(rf}gk$UJ@skuKmgQrvuif^Nv38K( ztax`2n+1p*EKDnDh*+N-m88?L{E!bQl^W|gqF(4Ebk#9WFwo*WZ2|Cm%+6kVCi@`h z=Zs2@pfaga9)e=6~n1c+{|6ltS2k&Sfg> zcVL%<`}%e#sAa=yn`){uq)oDVx~$o=I9<5;p2c}gh}T&0sJ%_F z{yT;Vi1$ikZ?G2#0#o)PK|HC(v+s#B#ZkD>L33U`rbx6bdka)fZBqdRg-CM7PYde^4eU(1 zCL1+(|0r=#DhXwlwBU^D(fcUB(iNDXtZ^PB+a4YLO&XGrg^Y<1&vRiBNU2d^`+&?l zZ2U0j!*3I*jcFn5vXt%DkjK$M4i2uouHMqpUMwH{BE)ze+S#+o*|M{;eQSiVbZbex z%GS4(a2V&!k$yGBn^W(W{Hh=?Y6@1zu$uqwL*M=4>e55OK6M8}$w$V9R5JW?p4)(d zS10@;$G8pS4xHnH<2v=IwE;0pq~N{*?KH-(ykVCrE|ID~Vy?gCfPSzuwVD!;Ad&dE z&Z(wunb-|@%|VdaeOA!9J6Hg#5$D&A^q1G`6MxukWhj>fxKrycN$1sCa*ZJ%d42i7 zTnx@=5Yz@(b$i|48(p?Ul(N!Mu2wQn{p!`zdQahZuWS z`Rl`_8Z8goUk6(jKH{VsvoWQ(mJ1LjH_SMH@5xh)S4e;6`2A6wSVFi*-Rq$2PzFCd zzb(^np|)!)M_#WPer|#!H#zoXx`pa&^C_^sK54X)uKOdnVBbl=*V@@k81+Z{{jhHb zvJ8RiAoy)3nY8A9$&GwfV&&#c*>Luzhmz4YfmmVuHH%(O+;;1(uQYB>W$)}^_s%#| zOI$SyU&P~eqd4vy?2?CHIo9+IlSl-08xh6PD#5G}W1})+FhKJ|_!W6R zw`%?(q zvj=qZ*e(P4+!vt z+B*cA>pJ0dKTI*%%J8TpEwZrd`O1uO3+Xg8laDM2HxhX`#XbN@yMxR{%3QA^1URD8 zAHD7RoQctB{yY+rm*=;x#MtT@2$mRCo>yTT73{=OBsD}sh8{ZThi97S9sdy zYZe{XIQL%b-*?T7uMXPnRj7HICrphBNS8ZRe3G1z5_{u4XU^Tb6Z=l6Qw&_TUwGr( zT;xs2im;1Nf6$m{SkG!)Zn<~JEuXPs3D<`VH`f`udI#c?ztturc;JqI?y*PZ;*(t7 z!OT+oWVo#VqT%i&lIYK$_uWaBukcD(@)Vy1pn= zitVrCntt)CU-M+zk7Vx;>sGWNAJ!`I6uG8^qd{X{j#3#K%MIuR@n%X@sz2NurBQ!m zyC+cNKbl87)W9s?Mm9TqK@8M+CXpZQ4%$MXW*w+i03L2tbkwj?*^a0&d$sOnOP_=e zQLOpTl-xHI%#NElW1#!JWB=A#B&pqWINmt%&gqtkU6QzoCK=e%`~(ak=G#AT8R&Ec z`C2YQ<%-| zk-`tBLvPO7OG-E%$zQ6taITde%2xLYc<@gQIabVVe9ihXp*qBmGOCwOJb+8q*e zRQe#+!~Ff3?b79fW_pvdx1bFExot^F%b*qUAjk0qAdxp*-XbWhR?US-at}Y+G zp9%7ray3r;dSs2kA5<8`{hV(6pb4o;c(9cWflZW{ARgZHOs#uTl%~-xq^UP23%+)p z7o9&C8WTx3g#i=M!!F}x!VR9Jw)*?CIB!v`X$BbLhL$$eP;xUgHEAT?tsz#ntb=q2 zZ^H~ON2W?RUE=;*{3lhw=$ND~d-RgTsc7 zLl%H3A|&0dMrYnRzuW1W_&mqSfP!z@(-X~K+Fdy{s?oD`)`tm4K17H_^kNjo&_slT zyz6w;y~=6MXo6WSBHR3Cu)T}nG$3h#+ngTw%`2mn8Gu!fl8kc-cZDH$ihmEXr{zgb zpE&;QsR8<%q&W(uuXG9Q+Ywde=Fc|_IbGM+RmJLVqFyL)5GiNwqJi*gyZ(sg6Z@y@PKE zDzk1+axa|a?7!v8{P>@zzcO?R)6E>n0?;Idt@=(UQEfpAD z33Vt-4dDS$bc9#V+^ZYm)oe!-Eku;g{7tIjO$;T>BKGEA4HMjJp6`jR&7&@;T8IQ* zOK()G!=h?eFr)O9oL86#5X6&-n$W`jW7BN^h|Bz*4rbdD67<|tvJbp~)j82%zPu3n zp|8aIUtkKMroU1RApE4LZVb!MPbh*`Ma3BCmCFY|hR$=6)K=?0D+I^Q&!>#o9CE7v zMUx)%tt^0emgT8-26S(q?tDyhtyKE=zuTkx@9`{>h5wchUsCDFa3uG0Yi~aI`0BLw zvFkVUX#_o#CJ?y2a) z{M((h-m%b+b6_gh5pvY=^_Pc}FKB22nW?_~ujp70I+3k;whWvqG1Qgpx~s9Liv*j& zsz|NgOCKx)K?O6%N^gDb)Q5Su$$jS&;h4s+5cr=tA^${U){eGp7v8NzT?Oyn2KgHy?*Y;b7-ooFXJvvW!V(ohf{xb>m} z_L`i&!H$O2w&6t)D~-)WH{d}~Ha--q`_trlsxsfQ_s2i{gS#r8YXDCm(vIsBI5v$~ zmsIWLL529`SWc4Py17+>nb}MVOHtFWy7J9L{WYIZQSbZXy)zH;*k$gtY>}qy=KVQmFGXSTYNO7VD^?vO3 z6(-G3ll3)3Z{?q2Jx4ZQAMlcn_}E%bo-hyi{jJJ};Qp=hs&rs_+jGGl*o9D$6)1Dk3 zqxhn#h8sVVyUNEgtL#n!+Q>`C2eV>mT1N>n|9hMXDw<`bAUmJ&BkQ>g;aTVv4Ly6d zx9t*5fef8?S>NoAb2$uT;=XnBE!r=9dYLH~6B%un5YZx(>m6veLF8MgjL~2h+n)Hg zp!WKo@c)0gS129hDSv+R)VgIzCX{7xO!oxDCwx4{dU`^nARmR|8vl zJDg01a>w0s3MMcWZl2XXxnFJ%6M_6FHIB5iF|o%YKdgEArOa2!+IDQ^1N&&obP zCDJdTm9@5+`Ge|GcJAAAlx<^;9+|ZsPKR?$MT$-0nxw2Y7$-c~imOuU4R53_Bo%my znz&GHLn_kJhDX3rbMDU_kj_1BCLSr43nqT^Bmu?Z94R+P^J+9yc$e^E3cye*v2P_j z-vu0i)c7*-qhQVLy2$!cYYxGm;iJ)kr6n)KR1losbd*LcQrlHN_Hu;Rn#;t7kNL$E zEf-ZE)x0Y4^r*>^RL7}!#g_u-7KAvW*8Ga6v&=G*0GS^=p<;=(G_ITB_tq@?3Q?CCXv}C(0*9`;T@>WsUm=bI{qBXO3WB4b;~&c12l z8T=C+MriCr{4P|i@c>r;IePnFDCpiOi6`E)zo2W-%wC(x$#U@jMHLO$sYCSQcO&5n z2}XEReGwG;S$6llH3h>Le1J7EG366AyFBNJ`ziL}^69?U+A7^*iWc5)HHI|Xf!ZY6 z;E!su=HpOG^?oKcyD8n}zVDh3Oj<4vE;cV_J^xGZ|DE5C)z)_-TswdA?c<5u!_Xn; z$DU{Gt*rt@ts9DA)NJ{aFp~q!k>qmdVcmQr)ryRU!RXb^znf#L@ZLIUJ(f6BS`zQc6+&is2r(00K`CLJ`4EE%8v6Vt8Tz-OTf6~+fj zrD`KKx%ZUDr=Oe*cz!IM)xJzV|4~ZDv=sG5=r^;7mZpIj(myVihA>_ zMisT8Lx9rdcQz?Xa5RziTe<6HV{#?l?O_w@B3kiUg3qnZ+^o$ha9)2_d$jhi#U($>C^Ph+&m1_0zCMVDlhS}m7}76BD;sZfKp0+`D~17g<#@8+V- zIcJ+{7DV?^gpQ4VCfdYCN?6XQez_~gN?W?kht^{2X1ZWqyOKddd+~|^Jz_a%K z77Xgd2ru~lH;!K;g6jt@N;tz_AHDy>)5UP}d7?v8&g4$1kcmsGtmKFm&?UNIlQ`!$ zXlLdHt@MJ9p?~9AL~>e3lZVTYTbcG@(?X<-!NMJ|-pY)Wc<6v%docd4p? zTKh)O{N3W?nHw|Xwa&sDuwMHhbcYEANYv%%w`D0fF2;Zd`mm^h?fO!FlGz#$5Hc`q z9PXyDH`W~k{AC$DV!=$>Q^{z+jkUy%i~%d^SEkAZ@FjK_8H@;&asEoYhskFB)Jz4R z(ph?`sp$0o_WJlIu>Y6)>l3}!FLx>k@v zmZ{-M36@iPI4vxcoqQ(~B-q8I5JG8`Q%Uk04gOnWm=Vtxca z`B`~=)#@*rO!zhR0sjQ9l7|(Q`hf|Sqhtx$gXWB`1>d{&-L*dE0KC30D5Dmwe0H~3 z|2!7tUH!9E8c3QAB06I=CyYx7YkM*Q{qxR&_WTOTy{%sCz3z(DSSQKxXw58%R=18) zM{gt#ZQ9-uik0n#AG9!J1-O=OGHu5@9Qm&JG8&->F;p{wM(OtfN587RSJic{CkAI#Q zYLn$Alg4BDSb^n8h2zX9B{JRnKqpG7VyJ8SrLAk#>Y9WsOXf+#gxV?6VLF^_ic$$K zw=l?C+Zt(JvjNlml7M~lzuh<@v=f!3k>Y*=7-=qnu9-uIKi&_?4C*F1pb)S1)BuLd zNH@J)sK$s_%9GxT!H#LL$)-ALjTbS#lQBhA;uIakziC$Y*BX+~H&$M5f!)qTA$h%t zuCmqgjzS-(Dd^TjClM75O4mbs)fd>=;~6#2%BIj1L-ZJIu_@qE(s;zd*m7 zvd<8#=w)TSXCM38f=0T#i(8xXyz4zt;-iw7GjlGP*w91k1KjEKNa966$DhB7N*$?T zd`&ig*=cuLKDGDpRAc*E@58<;PLdUP5a-Hz&BG>De&4{HZYf`VQ8EeY@1v%0kN0D8 zi%n;X^Tw(>3F5YK8>f#)q-6EfKDOaOu>BJ z8`l>gZg!0TB}x*Q-PMTbW<(^27iz1Tq~@nx$gMdxAS&#U{tKaZQjuQmiI=b?~6L2ha>b;Yv0hrUad zakD65RjgQ`%5G zH;uVunk|;PWRQsESQY957mdh`mjAp#7j%j-Kh5nHJgW5KO7^2%|I<6gYtv(lMR@5?acP@*Brc z&N^o3N*C%66LSdFxEJk7vl@Pu9YG`F;9Zdrd&MEytZsJ5@Z@r=0a)sOz5Jo30bg!e z{SwQ@v>vfBF*4P4ZXz0E3C$7@aJEXTYiU&|X#D(IAo)y~`T46$t;MX(z8<13?p?>U z`pWheDd}yruY$y~h=X5~(luI%$d{-3X7x(t*)ZizH;)Y1{d+)yr94?W+eq=ZLhIdl>zJAHj#bGuBszOH{l!WXs30$W1c#tZqP26^sGq}x+emMBZV&$4q$k_rPgVphr2Ob)M~2Uw;-?oUt(HV|MQ31K1}bwZVkme+4k#U- zHKD2t>c!ETZ%yM8EZdkCY7tqlv;%>dW}`E#(!dcvLMrX_bAX(lJm*=yUM%Q51zoSPlqesb+WFE-NM z^3yn}>!PXIKqco+b5_8NM}>79-WNb%hW)I_f$RGApExc(ss1)@Z%3|@s@a%r!(?zo z4KYK)!>`Yzo#PM5H$c6)B_#Asf_J#HMORvE;~b5MQ1hSkh6j=BCFQ5OJCdLPQWDAw z{5h?~{O0%U^pl(2( zAv~>-DDoB6IsNr#cNrh}Nt@e18V+0yJRD*gZM2sTWXySA0qhioDp1318nnHgO;D(W zAIrQ;R=`F%bU<^y;&QFfin|gve5}=Ps`f;rdN-~%Pw$x#^Od%Tphn%>?d*E0+Jff- zCgh$*WZR9LNIA4ijs>B@sYY&Jd;dIke=mHQ11=Xkn{Hiop3==Dd8GK{_EMLE^teE^ zfxl3`>{lmq*BTQu{5*LhL`r5Vb8)^nY6dUde*Yse*cpi4vA!qBtkMzqGMLL#Tk%m= z#&QJ>q5h}^h_4Gx?=otkCC`*uaDj95e4AuP=II_nox*k1nr90#ET`Rg4itVcD;d`e z3hdSzQPyzNUoJN|-;c)Hzmk2KY>q5Hn*JyciFIu_q?%@7%61L~XKTE*fz~Lr!6_&g zFtibR5H@jqhHY{YiOc!Cq`59%EQORajz0s-&^Qw)xuMQR;8?4H(uUE*5josuGoOYzf2qeZdT-LEbz$u`$pyQ}#~wH9u;;;G zx!p|B)_ckRjE7N^hg_y@)?zLZoA-xGK54l~+3GlDZ-sN}?y-9wH&u_B-7fKmssu+t zlKgoPY#R3R5$8z=qbGb(*=AofC5Z`liwy>04q(@ksk;i!%6N|N7XKU!ei<+@JqTMv zw{ez&`T9bP?E)g5joBmdIf3x!ozpqvJl4a~fKV+n!<#cT zHzo_rZANN^4JK`deqG)8Runq8df!>UgC4%4lz*(LUws^fwe>Z-7eO+&RS@0h1%R&3 zi3rs{l=b-;p0mPosdxe@y(`Xf)GgzJvtB9}-A&-ME1eS4vGt)+j$c}vUsTS7+;Ul5 z{q(PKOY-ybH5X&#;O{MRj+}MR{_OZ(~9q*7_Yj=N8R@ro0+&k4e$F zR6czMzSA5`Ego;5`2~qn`)ekijW2bYypqK|TG1tO^-72!YY#I2CNtkVU)T5iPwsLm z+`8<@(443?>K_i}*c`Jeib+LLIVU2(l=ntpMH}6F+A>1$)C>C_`J>Y?DpTErZS(}d zc<>(ug8v-*|DPX7TwU+nD?SiC3*F;6jh^hF5%?Qso?NLDHSQ>jd(5os{=XVy1Nd-< zzED0n{V_s~V)xJb+d}KV7f=4@S$_e=6ZnUVaX+h@cg{GhrU=VZKyGvD*tyksAe=rJ zHZ???Mqs9%ih0qY9KKuXQj9v;ztmcj03I$;QyhghuvKs3?e2W~orP;u&#Be36o*Z1 z!DWJ3hG9!_+fr3ShQV;{IX6=cG2K$uw_)2RAkNxf22cQizI2lPCz@Z@v&BKmGen$$ z^D&vx>z0{I6yAmaj=#KpzANUi+s6F`L%g*&{vLZ{P8UZMcrLnRJKr(a=<&K4lnPH5Q zJ1%|V*dM)`ijltSIzWxBly;4)tLCTVl(-1qPfNXEDo&<)SErBegQ&$awN158|MzwK z<==`bBt99O1@{ia)zIacD5^*waRa~Mc6pwk{1;8tI>UMG3Y{@weg;n+?50OUxCp|3 z(5PsC`o}86zn4}1Ph7131D90qzlq7W%$*rlW%@!&-~y+*d+$mr=C6Nj4*m3T*opp} zA-2Mx@ceRjtMW{|g`egIQ`;MwE0YadXMesLst|*mldt=&{dax0>hnExPe?VTWxL07 zTWz$eHEk(0?keW%l}@(dKmP;6zdpGW62CryB;L)e*W#G7;p(+r{vl8R9wBGEVbG17<4kWjBU2N7#V1~=x92E z_evFfno zCY)CDQuqIH5xJ*0IUk7M@8ai+>D9K>iYqMS;mH9UKGsxUKa_RsjFBY>lZf4JrusO? zfl)*Fgi%Lw&4GCRbi{kkXfCGDtl^#+AF;|ncnXzq=#gNgVk!M&~Fa zPPdmQyKsSU$9m9BFJb+6gLk0Xy>)-=TG_B^KnQMmm6EY}W15O>)5yHOmQ=T7cv;4j zPS3AjF22ND7~dOf^oo=xlaZOz$@@eiC!@P(a@0#dQ1|Uj6ZJ$6vw}4gz(dO zgvvmt#Nkb+1vkzwEqhV}eupdM?u{9NthEC$_7)?Z zy)KV-t9RUV42xUu^*-jXd>MGqJw%M| za!T-l$+lVg+#Ex#LV1FuaGX;mm%80h3GouQvH+XlQiRmHDdnn1vPOOOC+|$Svg6m~u{$ zozLI%Q^|Xjm#@KGjfz}Fc9XfHFD-i3Ot*KVha#*cH@~D6)Nv?nSeh zAE0B+7CGfV-d~FpTV_V~9TJ5PJohsMbO!eq3Q>4$GFo_|iByv}(=R-N-U3Z-r$P7q z+^1fXf|C>swinVhFVkiEh{PszvWOsBtU{fBT8?#cSf7Ft*Id2x`i7mc*hmd(v${n} zx_QjQRP8IaH|2=d|A9n@fyvJ~%_ozjPMMIvd*Ui=LmmEBD|arVzpYM^p}8Ypcvl~^ zAp{n(v1Vz7a^gw8?r)!pHfE#k_qM03Zt)w&YfEQ!*F5_$b<7%ZbJ$0+YV_7{QcSV~ zx^>pFa0T$fn`#IWe%lPcb}Ibm5Jb1IRdCjua3BJT0s#T2wbHB%&)XEo`A17HI>$FD zBN8R)^B5UlX?Ddv)7afw&w5{HR1;2!~j-C~<-c83SVk9LX zKteO=XAMu5Jz8jbw%mtjF5kuQ^vAE2#N}%|jb(6$@maYlenXT+w`+)|li}bDG|$}p z=;5GDc)ko;YBsu7>F22k-K*1SjNP|pildkI)y>71xstA3f%m(=z9XYc@ ztzIRQ=V56XT2_mD;#CxTrsy=p9mk<--7~e3aC;LN`ZpVWJ~8P6@0O$F_zX*UTD~=F zaKaqQFK!gL;x!3v^dH^G!HAMi_|F(b)YJk&a(qq?|DxgiZa>qJAHpQ_(_mmA!mZmp z+~*u%(bk&b*=6E!z^zDT=3KBeA{Y%&2rD!8B9rnNV$-!zD{*|+9ajwGcpNvDfCjQK z%N`tynIw+ZFt2gQsH14f+$)fm*8;htl#14jQgSOy-PBp1MqxhNrJ3QA8`ID_V2B80 z3|Qvaf9S{Jx-y5uDuX&K8a+dk--|M*qMY<*lGI)6AbILV=^`UXyC!T>$zEW2P8 z!)^n>0G%vAaoexS7v8YQx-xhX)nfU!V9Hg|m|!3=Hak~5-b?a`9pI2J z22+dh!e`HERlE6?j}0enkOH@Yf>CgbaolshnKP3u`Gx6^f1Hx1d_;HYr9KvP^rSkw zs}jIIvJ0$csrwk$p=-ZVT-%#P6qe5HNIcd%wZex8Z}?T83!BFGt4hY1ZnEY~3UXLG z>3okb2mIIyO6?CP6})12Q}?b-_ABr4C!R36E;20D$im$^a8mmwNVlHDASpK{`t?0fklw^viP#~XST2@)@vWC zv;*0S*7mxY_*^P7T8>x8L$4^~ibA;RR05VcF!X7}_Ly$Dck@eUoIXcdIXz2f0Gw@7 zTAIZZal;TfiCUQMoiTF2Od$yaG)o<5I5It$Nyu{a;81}NPL2ObsR@or zl+qTNK+Bj`cUJioiGig+BKSOMPr|+0V$^hP#>eFM@6sZ3pG5Au1^3p?C`GJ!VHx~dz_gYA9C^3WdDd0TZSvN>VN}hCNk?*_5{vfeJy}=V~Q)z{L zovT_fDs`6yz)Mhp;07`W8zKVc?+ly*7{`a&t{-7(G6?`B94l9DQgtcgA5FfNLZswTwf9T^lpZ2=Wuv zQqAb1Z$s;?;Nwt(!>OHcv9&61lVu;*d9033W`^t;K)=J)!O^m)ko~YqqGsnpJX`+6 zmtw?Rp*6Gh$8ilF=ak?fmQLmxfLc1t;wu>J zYWWh9)@|@PcX7eZATGPy*EQA_$F>GBI8rRfx#aNf^X)Q!J+*FjutjRzx}$&{mh$ zGl)j<0L>Lixe*ihmWTlLK(-YK3RZ3|5a+TCv%=|OWj)rO&E!H@2x-mQX6U%U5t))! zYqH!GRY3f#!D_>5BbKhg->Kz@sq?D@7ypXL;xUtD&2DbzoNY}$<${|#=BsU$rPW!( zfh1hkB?9z>g9`cmr+2c;(=#&%K^SdjfRei0lLNxwSJd! z4yKzTUXCZJMRwIS#D~^rf^(Hz4$?&$Lx#JV%I@6S&v|O#wt-BXc9U>jk2Z-DgZK%$ zrDMh1yb@z6c-CYi@dT#_bkx=TJbdQ3>9*<=X6I9}Vovk}ZUp+mMg?r`E@pxPPxH5I zj>q$^$`brojiy$>J>hZpkTK@sZ3LAeEj?sYD=*C60;i}W+iHgmfX9~8O1*cIUxT^k z!UoUZO)<%-A|^m)W;l0q??${Z;_yU++%UVLzamM&$1tn=W$x`Ri4lcPPg-SsoTAXO!8V4RaE%x8roCuyNu{9!7p>6xqO zj|H6rTMksbSeC&>MTX&`nt4)=J2$*r5-new_dv8C7A|YrT~w~)zh!TJ?J5b@-kMEv z?W0UTX)UbwG%120J}5&I0?8oHKt1T}-j|gY@WdJaFG7<786}HcvMu8owqmrR1dNTD zappc=;kr;4_&{(3sIU}x57HPr!ItI++V8e*e3UB~xTH`|nXk{p*k zgOtf<&2IW9YqiG}g|NEGAFE_rUuJ$*L+qECL$g!A=adOtxu?rhlHs@#)RD|v;_g^94LnlrtEyWu ziAN7quj@?3^>q<>0@|g~6{2W^^XJ zrwoOwbkmP+D#X`jKF@^8;zXDa8j&cgN1x04Iy&>ptNQx-P;1udrUI;wtBRa&j?|%` z1|vfyc~835>1Brm*eydr`mpoBJK^Us{`YWJF}?sJLln+ujZJHqkcJ2(G@qR|udc|4 zNBE#OIXk)(UYGcG0T#kc)2Ug<^_KIdPb*qBDd|K=Gr4@0syZ)uN0pmj(6LC^!VV=I zhSQ?Kx~bKL4j@qQSjoE#8y8ti-3o-7h6r2)XuR(i^!p9P%iSkQZ1bi3ZJqGB(^(cK zz$kd#SInbH+}Vkx?urwG$JKZ)z#JDnJl0c0$s?GIHI+S_Yn-qu!%*V_8k>hV2(Gh9 zXg_iEF6ZYeuizwAe7fbBh(iCFNPa&*+Y`*_Dc_!7`iMX62{Sca>rhjDQS^>=SE*vq zPG=Ob9I@5Pzb+mI(4wY?-66}ESXANDDYYowolgHMUH0; z=Zg^WM8_%lZdQHy?U!}VG2>_ZYoq8%oC0)Lbd+cJm||RNJ1+-I*|2(>t!>?0T1I`5 zgmpe}S<9ZXwpuGpuR`U?xQ*vFou~3LA~hE|)Ka`Q^@|r~GVdg;9FSs$oeZehXVQywV5 zzu#0(X-h*0Z5h?1t_pWCQQDHRKFgooW+0v(aZWquw0I4)b(~eN7RJlht8vyhZrFN@ z)#lfb-%O0gMdmAi)_!w8DnAw5&idp@Mq*SHe5IsZH446s29f%a(4?3AQ$66qek zj!0PmYQUvBW9&W{Hu4Vji-hpxO#NwMU;UyCl_F-tl47` ztTNOW*Jx~B@g1K$+1=_Ro$Dbf#}3fy@NjbkmH1hs!#Lb(I3xJ`N-5se+hqgpH5iE=nf&L_a}8>yX(OMPbC5rDxCR5vyMr1p z^)pCpc9sBq5;WI?O*Vkt`z%;umhZ$wn+8g7Hq$Z zt2wDUp(ontb=t;-Bhi^dK0q%nyh&Naw66lxra0P-4S_dn8i-xc&z(1APEPrv<~#8? zs!7e%&K?i#y~6=&e6y_k(j;p7#MY5Y#2FrK`t#RjVj>NV{Mm$f8ezw#q-DbBp;k{l z{Z7p+!`tbYRKtMitWg1iW6Cq)64Ea(vGF3`pwRiMbGaAmk;O%{)qLM5aPAx9eTtZ8 z%j~y)QTup{J#ucE$*(Yp8tnyG+ z6WxOEq!_*?XoZ@&-^pb*%!;zAkn5=78#*!NW6-v1tdI!g=ApgIFGFEnqWJ$#0hBzP zh3eqh70*kxjqVfz51`5C$k)O3`sMivO7=IM4-OE4&xx!%xqzAQ;M3>aHx(=&r3qP2 zh&|4)xR@zeLw6ES;s)}re4fFegY573Cw`n%l^hq2lj-_nDyUV?wIE7#Zl5vpO@wbk z6>uDeO0L-uf{8bhjd8=L*qz6loL)pfvfyNn-RoP~(7o!?QFWAM*93S2lBZV{_d%0n z+EP8~eSl?ZsRot1$9sKfG{cJ-kOeKIYKWbnLWy8Xe{M8jdDdZbcsRS4_+B_W?QNu) zobZ&S>!#8C&SW2z2NR*b$VDp}7-LX_I-0j52&{ukE(K?Ef9f0KgFI!&`S&B#eOf2! zL7#yY71&945Rv9$mmt?d&bGH)*SDXtD1UFLVRdqviq;*!q~KhGUXHR){W0O~yNnPT z{ppb~R>?o^Ze*cW<~ZWx&xa}!VjF3hjSp`2{S{SeWP(of0-zCrBW6o!`8fdwu{O&s z6Mxan%M6{UxtSzvwBAP=Sx8n>rEHfOpHUaB*?Hw456tsIogRaMBN576E|*8)s?GGO z;$nrq4?8?sxMqkGdTZEHIS15>HPkT_fjLq{@z?E!TO$ZbEGFRfi7v&Gh&D%$<|_&| z?yLcCe^{I^omu`+j&=5l>l3a)jkr#GJq#G1Ye;KZWvnNf>dHIayXIL3X1L9s)LRZj zfne(T9MLt08UaeDDSqiBg-n+Gi6yWehU@d1$o0$iTg{zh!igpagw1Hi4Y*t{Ynj*; z&ZFl_7vI8VBl=m$eJ;k+CLKWc*5zPG)hF6?gydUEE_Y}9BmMWm@hxDNg;cdmEeg8Y zYQKkA9`IX*uGTsv{FO~Lqi_~=UFT4a{Hnjm_N)BPJM)ZQ?P-^!w#jZYPn$b%;TId z?Sk!yiRaz^YVn%_iA(fmC$u6lJJlm!e9x1X0AX(=X@_TXT0ZAge!2SN$D&1nd=N9Y zS8<|JWQx~tvRc;UEm?4ia>8A_N9UR+y_CYn-nHbY-ffES9g4zPsqj=iaOsbiBW^A{ z7MqIe=F@i{uMO}CRPTv3{Y7I8wUV<|m^!;*<|gI3rl)fwl}}PHtNph>H2j@KET?PJ z#&%2k2z5pVIzXLOBe#oWqG&y4mTKl>3P7xzQ4KBTgGJGb;9%$HbQ4BY@@xip0VP^P z3{s_a)(QPi6M8E*mnt=X-`^khx6~Z4OoBMfV*V*LkN^1WnW}feKcwb-RyZG$$=_1* z7*Ryj6nt#R3F!pQu~q4poKx|x3}lY6Jw6XRz7?5K`=~mLOFc&c2H%*Q(6&3d%RG6h z7B#LTuV3t+qMU552T@l;coNLS@Z`M!yxizL%p422Ir09^AY@`w&cM8XmB|XFHP-aBemgNdV+Oe8( zvFfK=)G)vdBPw^aZX2vKK__IDk)Nl&H&NExx7(V{&S}GPG3p{x?|b-SmK{2`Rmd`( zrTg>vkl<_VI$Lhm&w{Czr5LF0}s-V=DpfL48 z{-bXiu8(B{fFgH8?JIv5M3l<-Y6LMN_A&c@f6j8Sx_MVq?7Re`vy`tkuX4;8_FF4r z{cTu7xbQquj2^)RA80_f{EZ zLGR7h`9^N}$jRv(rzJ{_l@#E;p-x>=f`{h^WLWy(UG;sF4&!Ccr$g#GvQ83Lq&v@* zar~Zod9u$`-t?u^Gw*%Ho;`Br-oy5mO{EFAim$%)I(#{zw(6BAsxp~z@z&qwu+`&O zqC6%)wQWQ9@@rG&nN7nv%XsI|pI6xCz9|1(A@GdhzC;K`doiaTcZtP%eN3M`l!>#S z2_wF)V%h%ig^0!&t9ZKQsL@R|YaQH6OXord-wk?lS&cqP8?;6#A*yIdj80 z(TaJXp58LW$ueJ~48rZe>H|>`N?G2kbguH^72LH8fQ_zM%9*G$8Wu)YOqh4_W(QxA z(usfmU+leSR8w2~_RH37#j>S$lq!VY5s=3MNNCat2_U_M1ZkneRz#$CB7F;l9*~xh zKwwLU07@qb0qIf#(mS5)=l!2|ywCrf5APV~oNp(C@M(?3%v_6=Iq&aPNs_EHMhT$pN%nc>jd2?;@@%(fqva^;$k(!P|CNv_6e|XDa2IvP zfaw1TKvR2oZhaa^*$95!DNCrO6 zXYujxS|V57UB3zoJM%qCNlQ!dX(rv^yjD50w6OA(XQmSA#fvl7xs%&utTkz)H*Rq{ z*1BH7{eG|i82c#n{DaD;quYO`g|_$K&1?DIZSNr4;+3Ud*|R+zF7^1%rAv2{PZ1() zci4mrB?RUL1g4y6Q{7$NweZM?M+YzN59d5L_9U?k8xkd(+Z;txTB2qs@&%I0;fK?d zlxpR-+sECHC?2$1v-a3G-onW+r((+}9jRQTp``ooqaa-tUAZ)Zk-j0s+TWFcaq26EaGTwD<1EQ`Z1 z-Qq2bD>1UJO3SP^G4NwCFr(o=Rvfnn?XPzb>1_ne%Hi1U!Hv4kjYb6;hHgWmNYL?S zuuKr$j6~mTHlQRiWRvuN-v2Pp@^A9*FY0;xlB{1t-t*7Px98Pyg*!YkC z`|w8m+64V@y72YpKY^m;;>39bEZdvAG!A0<%BxDw+K80`-hKEgU#Hq%TZb1+0?u<> z)m+%HFYtWG$t5uoZVO-f849#j4HTa9RRy5HjtvUr(1NuaV|w;89nQ2Ny`7H7y*#R+ zJy(IIDpN|7i$_CjI?NLs)WlEs3&|DxUiVnz9A$@%`8k{z46+(_t z>s^nzuCF7vy?(o^0vM`R^JiYb@b|$LU2ZHk*#Zkv4B9nz5V%C#fHfB0u-P{C$BQQ4 z&^w^ZA?cx?+7i9_#rWnm@AoVUZ}$aexsKjqc$h?ayl8p9L0fN>aGReHAd>w|ByvfB zB=D%x)rGt*@x}^y7Evb&YS`xN4G}g=sC#mra@f|`H9!m9nHV=rv_PF@{xFz)s`qv( zS_9jrpj@)O=#*pQU$&k9*gbUJYZ+8E5zrvKQeTSfcI;m^!3lawUE%mq$?u9AukEJ; zh&;w`D&+J!$;NwlDn8IuXUmnooFrlDf}MEHNYJ94d4u78AQDU%Y3521Zb-7CYB!#3_r~w9i%T zKHm4+e7_jtfKbO5OQVg({v%Gb9AVQ}NKqE;2-ZyPC6kZcD}rNBC?V;!8|w9}?%&Pp z)1m&{EuJV%d?6!fy7^*#wE1G~O&f!p`KmJ6a(VU=;xQo3i<?%8cy<-Re~uA{GP zrM_+7w+^K@k3z74vRBjkgVXnwhL&r5PCuS44pq$e=~{h6GzWyLMm3J~Lng`=WF&V) zRH_~+TNfI~J1z)(DgqZR4iAzWy*zX;;NI;+Me0v~uo!cqMcUg&Y@u(=;V#usTkTS~F<(OyiHFgfKZ~ zo!q!~8*Bv4qp9z>zN(F`I~i_oy0aCWp6`2CCRIU~O>&pDep;*EEHtB4YSAwVq+%9=Bo zjE5HFIIitXO%JFyfB$&@e^Z$9|FL8+BifTaC1#VaignDprX^IFt>H0dqMBUX*xyjE zFgThroyEKk(5nU}vA&(+Z~~l~{0u~SFH)rSAXsaPzihux;4M*(-d*)-^C~vU4}TLE zk0mJU)wSvBzyBT*1In^Ql3<7AiN~r%NVrrTakEj#3yq;#n`E?~X7j}7F3m~?@%K%? zKeV-LIaK<4ibhsPi&v@h>87*=#XDx2#7fSS7dWWHD(fsQ!TVb2j(!#R@w$qcdmsPD z>}~Y8V)ZLqPKd8X*@^&pja|xeAzAwul4>JPnIF1W4w*$Or?GDo&~=VC;-6drSc@%H zbCpn9R-a2Y$1nATuyNhiUnrPVT{^C6m2w{(ucFo_D9shB z9WolJ<3i|72)aTtBBCj3wT#RC&;RA}u}sijMRcmuw@hh1qGt!`QYQc1znR6*s> z##uwJ{0ThY>&&(#=@ZIzF{K|>kTjnV(*2K=9c zVNmfR%g&*LN=F>iI2zOq>YAB#+l%`09AO_MmiDvmWO-iria4^26UIP`7`;k}=PjUQ zMy8~1me?|)_-(c1)AY?JXPab&@NwcbS`K|Hk00cdN5sOdo86YwQeKmY{%8U#r8?oT zxK5lD>`@vvVF#O_TARt-{e6Ees#7PFY3;ZER=~`-%V+Cmn2~VJT3g0+CBZbq_|~Lp ztGQ(+xo#*Anj6Py@&x#BN&Ln5ysmnXgP%{;zb&*3ZeddyOHsRzh{!XM+yH;#canvC z?|E=)wRq!N`|<`DN=qO7T^gU^*3IO~Hd!;$c=KUeG$}r{Vi=DZsowpfmO<)TN(hV_ zUx_90#FzFpS*-N(dzZd4Ss&BA_WBep zgS{O`zcbX6BH*5SiP`erTN~S6_o7w}?DA-kk~K+(n_^e~Twn@%o`&cbgB^@7U}|F( zo$@41_VcP@#e^zit@6HPY&V2Z_58lgii|kq!HZTMzA7cRFNrt0E%eyO+(%(br~r3s(_y89p}s>G5}7+k$#)5tKRuOIJw(h8 zgxUf6cBeCq#0z}q&|=m{#~@daBPh^<@QdNL`<(j5KX<@yvxbAxjI7IE7B;`fw;0q} zQxeuymhSZ7h|j5?D{ZV!Wc4qJ^&6%c$5pxN!?1yMdV$f5{|;WY^%>Bc|gB*M3j!ucHQa2plZt zU--&W3v(9omA-%dGmifkUJ*-^U}t)ZV}j(2nOLvJH_+F+n0)SP{MwP2-alq7H1r>; z@8lk*|Gv?Z^K|cu_QACAR!zt|!PMD5J+`(@%?bEfhOLFB-DQ6Rp^9wPrwMk;6@Q^D z3H8%iln61~&W%>=mvznlt&Nn?T|Z}UCXChwZ)W?TXjKi0I_cMU0C24mc3RkM^fTAG zkdMR?eLfyq7(@R^d+)fAVy*~qCT;$VOh`RzSxSssXfc6H={T;W#MikR)DVQ{f2zb; z^dfHc1FR!sh2}+X#^i;MU{PjcfsuYv_*rhfa{r4iJHvg*v)KKJ&RLma8Go)Y+X#*^ z&pOAYM7}hw7-+gLx=KUTt$#AJJB?|`0N+ED;KfL~o~(Z^hHPKP_mVtSW`CcU!1~B} zk-qlT-aJGPv^3ZKJ+?5>uw|{+vQrJ5Zaze9gya#7D-JaZHpNC02#|T&RY#s+~|Q`r-V8ph#+1FZ*I5=6^}uiIr$uijJ_@e2*dK4KrE2L`he@j zbItG4wfxu&yNp4so)sR2bwHDK7F|6ld>3;og&rMT2^&KLtogrOyTP3Bw9_&9-k`w! zzXTTM9ynMDLJ_jwvIh4Yxq12+#q|Dg>&w25fhVM+$_q0x!9Wu-f0C)7w$j4}$r!t?Xb%6~CTrUYZ9r}s@;Hh~0!XZ<7Ow!I;@FErGx8*L7Y z>2$7GB+s92)QwF{9M_30aro%k=6PfvKGW(WC2Cj`O)#r3EDw&}0bs^;8@%~5mNvaZ zmmF1i_A+YZXXNy#r7p~%;Id;+v8$QDwPP)2Li^fO_kgp%%remKxu?0O@Oingo(X!= zccD?ROHJf;80SD$@MEOzT&lPmHFFLiP4M4_M1drVAiXuV=jFi^-1r=SDhP;8gYwgcOJ5c)?P22vFF1uk8hC zgG;J^Vyvp|Rg<~Vc^tfM6Q2VN95_(fSKk~Lt6k$a@V7WTy@A+W{+haX&_%OUikcM` z_Wjx$Nbtq6De<;xmbPV;#TTtU-~z{`Qgr%5N49T8@>rGC4Ap6HU26qe7xaW-x6 z+T!gEFKnPS@mRL1+-juhzz1%Ng6f*&Ctbu7*7~cT)E_f`qQcc1GW>(9&fnQf?H8}a z#39Ze*itJ`j@xApSGP|04oUkvZK+`9kGU}<@Tol>aVj)Ll1HWtD}VxE+bxK$&?M&e zIi}VnDMM-QD1-QCEGDHvA;|>tjN&?$5fJn^MT1@8kl8<+w<+jy&3DXQfLs%XlO@X6 zDw8=SSUV}Taa3RL&WG@vrh_aeUwt2$X~x}q1myvX8YE^~S8)67-<_8t z^kylgzKOtjla9-<7^_M#tCl-GLF-4QW#!gZ&8kXG{Z%%~Z54yoh<~C||F{TD>!k2%5m~O?Ko%+=ElT5^lPztPIJc9M$tl9DB7s z4O=etgoh((Rl>jZ!Ap3Bj!wT02BZTqo!eE{(md@8R3@7Xd81O~q?f+|yQUl|Qa_rR zN*rT>;^Pm;c@31(vpT;ami5>{b+IpGGS^>-<^1W-n6=lO;eo5UyvWU`+`D(Iv?D9O zPhyuiv1(rw^<{iC;H;;1+XZ6(!*?H-Oo+-$5?fd#;XHg2oMsjbs%F=B6hlJQIHy1| z=~AG!>qMgUNJ9d}1g$>dML~xicu}h0XTFsg)++^uJt>t=T&Ht%9Y{yt+} zn1h73;pe4o_O)ikJCt>EZogqiA7~63V~EU#ruYB;p6w*1&SnFysH)asmX=#y5`!H+ zO0iu7o2}(dS}pv?nZo(hgE}7}zh8^pzr*$*W{2imd{(&0i|>Eg-b;1p&;;C+%YcfR z-W55YwgxsLbB2RjMz&^Z?-mD_yWzil9tHa2cEr`AFDW|%O|bTHJ{TAkEw)yQdMD`H zsD{`@(2Rl{gI0Uv9qzaa_X=@8|JTB>iF}$*joG&Tf`9<1>H_$L+R$^4 zOzQ$Re_H8+LW^iMG)3ma8 z=#W;3*X>kQNGEX-U8^i6koSurc$juWLuZr`33{%UVcnYnu+>mPWGG+ha$)U_Wv z$b^p-21^yQ**ru|Qo^Kb6~^ zvFr8hnBIWDE0R!_bU(hiMi;Gwl}V;Me@%kd@4~)ntWc#E zDS0!q;`=|^w*=->zl_ZdZImXIj?5dS8O}7DzJZ(7#>6-_IC7kT>zY%1_SbXRlYB2y zyc2Adij3O+?(XWLhg{aSPnjGTkhRnE4bA&snON3&S?*6t8BNZ#T8+rWJMy9YO;!WS zaK|Wl_A2k|58+0RDhnh@y#_(DTzo-V?t3~{Ooh!^+BwEf@x2pJ^o2D^I;5u0KVMY! z?!1}DT;;~GNq9%T$i+=FmKlSX+<3a0~Gr3J`q${d&~7}~`n$dbML{Z)!{aCfDBR0O-!40~ob zpW>0Hv=vI)bq4L1WEC*2?A$QC6VBe)lYh{yUs>SG$S=yZSegU?JvuwhAS5}+_tqnn zCHuF6s5mZC{Q6QoAGvp)(~K=3X8yQU>ZDKIRj$Wgt_q>625(foF7aNmq2<{ktu?)s z!rRjVpQpf4Qm6`(e<&hu=3g5@r<&V(>v0Mfz!&R>I=6<0LPnY2k0v6``>XsP1=>_d z!eFfvO21woB!b`gwP`e!=jW3 z{3Mzfmv)XMAJYCsH-RpecUqSd$1sS=r&7_dA)jKyRG(S z%8A9LVe(s$(vZjC-G115)>Ey1&z4kM%*0CdM_`jy*O8O(x(%4a;Rs(~pGsEl1#&W*cllM!+!H^;w)kmED0v{MW4 z9~@uz$HPk53-c0FH^3;@z;y(ngD&4wV1J{N%^?#14^cseT>af$xqh zMd`X-lBj-c?as}aFFVR5aqoTfS)c-677$;p@bK+=|IxvfD&4NyE7-_W zBCL9J%+|KPbm)_83=R~Coq9&H^pKj@o#9lwE66cU_mnN!I(j&!`bx2-rrJ0w42h+Y zTN2$eo!fPx_b`)N-U+)rOA&Ojuy%?MK948_{l&2PS3^q_o&I?Wfp|nXtXHUG6OR%` z_9)z51&?{hcy+{T>%T$zntjc^T#FW(`Sjl4dpdI0UHP587A2=|ID_87Z90btF8i)r z;Ju)`wtHL~Gt3z+`HI<|dn6U++AI!jRn}T$j^ou$iOrFG_bDW%2$8vD-K{LCZA`Ph z@bcTJ-r|fo*Xi5T&{H=Lq{6c1xaI%QZC^wCx-`o@3TA$oL{|{ljOVC^le(QR-6I;|+csU_M*SJkoOu+g zK@JpiCK;T+_PxADv5Zh~zA|CpxDmT1)9_d(7|m-_VNdy5!!$jEbv;lSwa3gsA^^_e zib)>FyS{KI7rPATOu&wpj zDC+pGv0jJd<$sXm8%Lay3z>Hb+@5biSKJA;jCaI`y2fqy%}#F}N^Bj5@@_7*^4~?| z-~EGT5&n(G|#$K=lVPs=WhQy+fF()5e%ysllq+t$kxo1Y-W z0aa3eLPo2Pj?rk3lW^X@)>_Q z0hYqBV&|J%2;3GuZD&mObR{R$axBfjGc3s^q{p_tpQGI9c2X@H=VjI0#*BzgAckHY zBRSue0+Cj)eL$cZ>;rGrT<~i$s zcd5-&j?-7ty->*W1__zP``6j(RE;<|n84CXr=a8DCtr$~0qoMk02f9Wh6t#xy4lG;k27_%$|)u&0bL%qGdxCbgd9An!WhDxQz3Gl&6r8(}y zRrtN$p*D^Yq}ILKK z)m`8BN|=#2hzfOA?>F$-+Y2FsHASP| z8&zeC4)C8luHoJO$KL1?zUR_TKWaTkx&{V9^4~Twkv-*BYL2&bU*i&2Oz~d^-bbo& z?V;)iJ9~DWT_{*9NLTaCr-)Db8zG;o?2$gU!QedgJ%Sz^tY=Ne9an|C(NU+K#vb<0 z33OF5Y4Gcu4?OE}YNrUw!CEt;Ymu)Y$HVjP*%)zbpz4$z2z-^lDd( zf52&IDa_F=hZEOM#aWxAS1rBJmMKPA2UWh%mPJABB-{c;J89b2(<=wCTY9k z62p2Ez9L(w&io-D>ua{u(fH=(N*V%UTZ;S6ww_5A1KZZgl6qB zkaIZ-HXdTwQhi4CE`n^E@?_NVq$Pv2aN6)e?q(vY(!X-T1eViGe~=XE$w|H~QE3RS z5UP_-?v%UQ%IJ|Xt^)XWIaZTwTE=%TW2g)Cpr&c+ivcNhW_gxT%O@~EOlEvIlj_5W z3ZXYfqK%J%lIDdLstKJU>WtQf7FP6XAs_j+W%O4@UdXmCQP!UEblRZAC107ocE)9N zH<|hTY_w9rN(BKt=rNMZ0lSUE??S-+nNs;%#Tn}Pi6Z`K@`4|CFI4~bp9EMr*z`1} zSp_e6sX@U3KGKi`wUL*9PiT*y^b^+MNIAzPROk$p`Z_Wph3DCP_&+677!3@0CM@3wRnIyuxZ+(aRPxa|%F7U0GKlzL<3A&gwA>03E|Lw`cav=U%U5bG z@PPccoW2-kOGJ16TjJ8f1dw0Yff_u_Dl8k6t+QyXXhI+gEpUki{P=wE%_Q7aZZWIn zt7w@rCW~e17?OV|i^IQbb~a6!Art*2bA_dqq1&W>+)H#QwGU|79gty9JlFd?-=~cUUfB!4-8Mg8VB4a^AsFAy}^T*Yates?nVqcY6{2^2cD! zFOD^XpwEQ}rP!%&mVm?!l@i(p2uhK6{n({XVatiDdR?qFz*CkQZmj37S3P3H$~?j| z6+83tmZW>MPY!FFnC)U!YbO(FO0qU^Ih?8nC+LMr7unhesbxr3nAz$HrlRka#^JO? zzdF}scu2iJn#qPiDJ}P(hSCc@r-j%C}rVeW+`r7jOO}5N*QmS zucziYLhtJ2AsWs@Q7=kInh%@U%>r@-l@`krYH=Y~2TE;+aVasn%d0!ivyEbq7DlZ7 z^^v?KU#YQ(w{ekw-n^%WF@y}9e^vz_q|Fq>a_atd_|z*qE{1;xR(iDFaO)^2>-3cm zmL^KjWRXjIviWXSk)26%kz%wx>BhdlksrQyCniF(iT&*Ja#`3nFREaAm<#2CZ~V~= z!$Ve?r)`9Ayy#PZ|8GqLiLD=`vd{%!k@Xc}(=R_qvTD8h|C*yPQh><(6 zY2O>_z5=!Bu}ANpQq7Is{S2R^3a$P9uUjHGy^a}!pG7X3W9q08(lT-gRJPSSwJehe z>RKEC)oAw*&8Z3?DifEFsOuJE!@b_Rx`F;IXyc-<{0=c7)uw{U=nA?aW9s<=KH_5z zm(lN0+TEW1L(|2EKiadkMHj?#_0?|Y@Tc}Vq?DJT%ne#O#S-a0a%v+`IJmsL_VMz! z6TOr`Q<_(sKOdILaVsf!B{2M3`}W2HKf$Ba$3h(_FPN0#(*yFo7-B zv0XOvuu3`zosnW^ZRhq{K9PXg)Lz(k%fihYQ*R)k_|kS)->J|Iutt6;{=Iz#qRvk(QZvaepwUdwodXv8p!o z2{A_kr1c|ltjopc+@EQvGi9rIS0%qgd~J!zlS5d+_C4;z3T@{I39Eb}p6g-A zr5=e~4mm~l`V=;wnb8V0rVemdr7)vCO9gh;HD=Ro&=0lvb>muVEar5=u$pIVHD>4{ zKb0nk^X@*|HP`BI0mWYp@G&OYe(vKHD%@)<<{Rj+`#N`LiS^0N*5gaZojytftWQRN zyw8+$eWyh0+2Oc_xg#pt<@#X9(<5A?s)6lUQeRzGWOi(5LN%vuCsDh4>0NqAr*ti% z2Gj(qf9KWs+^m&Y?jP~0hrdV}JoLh^c$%KAAqR*Vb*XW_e_mGDz42-4)y_<0iUXCP7W%| z0q+)vW6iBLBAJN}oL?PB_08~^R9}z^lE(L77DM@wJuz|$Yt6vCd1*MMW71NSsO~nI z`^T7X_5FIdoz9{>cdCozRvNI|dw~l<7MpnBJUIQ}{$T5TKab=!k=4hiVi4)OUuAEw zKx+FSBGHM$LvdFt*u8GhAXL5{GntHRd zZbJyqmy_O^ToDcCjK%We<3;WlS<-XVRW;Iu7L4_p=%pl8s+$Zl_$CTtv0)jue@jc$|cf&8zInGiu++s`4q{l+I`p!H#)v@T{WMg)y z&i?n~wdy><>Vx0)Lz@TSk_$-w)U;%InFK0|Q8@Pb=j6P^nG)5jHH+a|qfjR&Bi^;4 zr}e${1g426{c#E%%J2ujuRQ2~RV3cHr!B|t{7NS6F12XkVbt+^-rwIrHPd%+!nxB) zHMTLai_$Odgv{0HONK76d{ebue~ZRYEMcyDydO5LmaJ^dGFvANBLvvuaS9TJTXc_c zV%@2F9oAHEs37OPQPT8}xy|*M0AFRUuc>6Pgl`yY;SF4L{M5^vPWR|2od(Q={2z*` zG08uvFe!)qu15UKVelK=f2w=a%>Nss%_^i;`t;IH8!dd{r=umihFH{ps(;^nQ2tNO zmm+v$JCvBGc~QEgd9U7A-0(khN<6Tn`D5gxh;yU^S)`A#YrN^_aikZXkv6=*u#c^H2A)qQ=%+IaW& zit1L4tN8VDx}2*)?+Po(TnfEL=aFLZ`z$7>m0^do{_xhdiS6mKPqp{8Cu!v z!|MlUr`nJaF1JsQOfxx5e{1_hsC;C=j#$LNBiPGwgC5XmjWSCeHCx@K;YjvKO@Lkg_bRaz_`EEaeCW> zuH7*5oEmDEmYl74`{an{&oY=G9lt zDsTE!Z70SmSnI+ETmkKN>*>``;+KEBBH~MZBM}8@C|aUd%grBtI5d&n>yzud&g0R? zmbgr*fp<@{t{!&5bIB&`I^IyLj=7f!pq^0);)KWGayJ=tS!rK4w8LdtsnJU?eeo`< zaeE&b`yuKLd|}bkg6|riNB0A?bn{)Zl13+;Z-xWh?^8oL*ze6GbQaT^MZ& zXHvlW_qrIIZ#?wMDdun!zud}wW+PeO{o~=a4kX=v%)SLQFI+?@Kj}lkS@2v%QjiG# zmv9f~#|9@gSLykAoUL0fis3xcIyd5mWXmPIM=QZ-Aj$W=(u*Ld-YVEki{fXY^?bsx zOY#;@h^yty-<=}@b5L$mkmyM$4>ILW*L)rE~S%5+Q)s0pZPtTb8 ziwVQl=l%si)rf3YMx9wvUi5y668YG3MJ?4G`raLX%*V^h-QHF6fZOF+hUTB$Cdn>q z?)V8x4_1u+72r4ai=p(`^`fXpzO$LmpJG8u29wNnfef>iQ(Sew2H+0xaU86!?8Y0D<%O|>?Zsz{JYThoDW4*?IeL)-9?Lc3580&~m zlN6lbTKM`^E>WObT0eprV}>l0vaF~EJHAAA%>B%;aeC!_)X73nTSN*6qIw>`T30=P z(if&i4~=6Kh>KvGmzM9D0;IOG>u%TvO&kVqtEZ+(+}F>tVPvEIiC}lt1`99>M{PvE|yetA?xBzD3@(& zlZfv9NF@UyA5TN)vk(Qr>Bc%_?}poso*`%EW)FC^{IrNb+s4HTwe z_SorlL8^4gmsEq!C+KS>{(|K}oToBa6o9>VE=x zt~N${$V+25zGLxNFUrTJqv1i34-e&mPyHd#(31yU5ArPWUWgKJo@8%BdP|Bd`Fqr7 zh~q27z+3nQL_%$~?0HY6l5HVT!dTzZ1A0b;IAMgJZ(5v1sPs71#^f+HLcM@XQi
    -x7{!;bgb8l{~wysi5c881A5(E$~{rP8) z*F8EpR}k54^=^PAA=4UF4hRv=RLpQV&v`}Ois1nc^9X#K;^$RafAi$~qxIjTUA~BH zOT6?FOdSvz|6RCV0uQ8k910G|={%g!4h{!(;I0j&qor*>2))(Sy^^Ba4*UzRTm5!* zCbk#tOs_it!C)3$vn|xb!|Y5OFui06g&j=LMfM|&pR#o>t2wI|4qIspSK`tNy)Vo= z9fo1|xo87WDX`V1Yf3!#RHP^lO%KtoQhqgbKh{FQTKD?dou}rHK*DOG0Oh3}In3jQ zS!h8q-}13)W;^jvlT#!=FrDq0k4KE|2Ze(2nYzW1J_QQ3cv$765@?-UwGj9_OzZh% zczRfa?bziU>Ziv84TuT6T4tC`!@3ofpqcaJUvOK6?`0j1Q;DwnGvO)av;p3tmEo6G z`VBnOm1-Du6ZW1tqC;%+P&Jkd%+?24;!WSU@B`N*C5*VuhA`WT!W^uAfO4RKG*At!1m&;s8%lH7ps{Yc18Xn71+A1Zy{6v zyJ9+yNPmap(csLmw&Rp@El&pqW_su|`%unUJw5$-AZZQY_{V>p>NdODf5flNX+Q4R zTDw-D3E{FkmX?=R`M>_jE1sujkncJl2mp){zNa)ZmOR~+s+YtOe9H^X502Cm*0q@x zZ)~t39JE)Wb6$M+nie)s2kO~>AzAd_$FF@#&2p$74KPl}?T$qNdSui;sYfPFzobL> zUUQcD*m`YGpV!Q#(3~Z={!fL|P zF$*$oAIrs7t$I~l424k3PMlg=Itqpr%@M)^OG=&~CE)Cq_}nNe^$82@T)7N|8QmM3 zE-n4Vz%lic_^t&!zn}mrUI)6X-@VmmVL!d2t50hI1T-^+iI@8KP41+rhopQhH`{6@ zW~8=asQ!(p@f+d)>tz0Ssq+6HHS@pD4qcNSK)~GL9=Yt|ZJDn!W?tU2!NCbvf?3kC zZDW<3oBW-831-KqmC`u&RFVil_;X=-aQXcu4s-xd6l*Y5VKlK&w?4r3f^&ArHY2ob z>|5S-E$ZBKj%H<6{49raIkVQmINR2Oux)m?c_wco5 z?1o*;cnS^We*FFRw?O8;vP9?C&GG7m(Fp3evMut9uPza6mF?c`lK2VSFvW4+R|LGD z6Yr|(n&Mc!;WYeYY8)Z^4hlbx7jgh*n_lYsK7lZC%-HY1vXU;%uO%na64R24a0 zatWl5B{?2BTB!ynR=?jwZX~1$>Y3q@vuvx00LHh(VQw>%vXwHFwN-Y%cwV9Y-_6eI z4Q*cgcP6Io-eol|i*=&a_;s7cY|PC*(g+k^iCNK#K9C)=!)#9GqZl<99@+E+sD?SE z6*dC@F1!El`MZlBKUt8P4w1K6lei?eTD9fjYL(g4Zoh%Y*hwN?nYWL+l{sO1m=lWc zahA3L3{U?C@jwIg;6DQaxLM|=)hKW7_!B^UHVyCQz@=t6&$TQGWik+^GrkPT;Fjp_798nY5n=ZIck0FHV^H!#ki|nj+nNY$EEyhzko_^C_#Io?5sVIjWUcSR^YplA zgTF+Uc^6WY$EmBNyZ89AEsFCpOI=ZXu9G&?@Be^HK4LhIv*-WI5` zV1}0*EiQ|-)&=VxSJoc?0MX?b++?+z=c?PCPUnr%Bj8Z(IhF?>7O!o6^^RcnwL7dm zFI4^Qba&dWb9(Z8W8C*tZTlC)A!ljCdn?P+`xirJ5$E0RvvZlO7rom2*?ps4^gC$m zV$3Z#qqYm9`CkktrRSvQi)v|J&b3mndF0O$$Ekre|zuxy9$) zD~RsfCb>{&AI9bVnc@YwClDP@soEYW=2G@ShEvd6D6e4hXa{gz^E~f}F8YWcZR?N# zoG1Sk;l}?#U!`LR2Wh+0Gv#>FobqwqASd?od#8~{YydIm=dYC18AT*0jqr(6QLrM> z{0WQ4TeoGHgaMH15hEz7E)jV6ollS+L#SJO;%J+{=(>H5S+L_t7{Wr|w6#U*TW$FB zjdYEytLe%Z>}SRkB_SGaXU0 z8lFxk;4Bb-?Wjm{JpY?!-97H^D_L3d%14LBDQ5mM zsy{TpmV}Wt2h43Xb;+~IPZ9e^)@eIU8LQ5R6rf#zI{gC74ee}ZoV^Fd22`#5kvh(q z6k|k~Ar!kzbWHV&fnjwQ`w0Rleoa#XSh7(GTMqukfYXC7M>`WU?9#bL5kI#CGYRen zA@VLu6IE2{S(9mbfN?rs05iAqW~EuteV@HgrBd)nHCSaf#o+E!{Ze#;4A{t@DAUFts(K`CTIq~``_mZg3mxKW)lbj>zQ z$Sx}oi+rXtmxXyJQk%gwgJ5j8YGToTecnLpU4xnCw6%pBh#HJ^Xbts!n+W1`lyV@4 z?oP=CaP*-KJNBQi#&3et0a+~zStkPpHe%c%*7LSsdeQE5R6ajk8;c&XZF{EXQxj}S z@CB{j$kkdOLY*n~mau6xv#Z5Q|4}g{h5k^o<)B4`{vgkWsk7A6Rg86rbj<1cbNZ8E zy3s8|8m-76r%KMIIC)q4yJWY?Y8{By@67}~Na`)?bCrbjI9XMp-9|j_oj1SXgWsrfQ(G!7JMceFafzy<}0=>=9cd+E&*wg8nMnII8&vk4e|ilTPoWE|u0_ z46c<4zOw$=XSt`%n}BAVPfu}DZyq5aM8Ydoac7>?n}{FSoLKJRk{h$-3%V-xJGMpB zUS)2>wSab}#>tARL~cDD@K)lejX!HV7*dq=H2OA5=zEGqy*TzXY*>5!yWEP7${zNT zpWnqC#TVB4a>q5^e}-YaUo%5ZRCdQ%I-%#4l?6QveQ`#!3!tGK93kpB9Ikje9&({i z8khG@l4@?hajj++Iu{SLn#b)Z85&j7|Z1z6q ztCV`UZ+yYfcJ&mNreAc`Ycx1eVfDN>T)6TP+kp%lCw%P=!UUiYnZ3-6CSm;MbMiC?EXuPJ!Q#qG^S|&wZ1kQgy#Jg_<|7_?08OT_?&xf6-{eZ#2yh_>|!ipsH%a-m8zgY1LmH*(PZbTRvg(k zOGHCf`R$7iE$4*d(Jww(`dTf|&Z2pV55Bb;$tCs7Q->EXuStAaFTRJ%$uJI|)K4`3 zaln(gV7vFt=cclk3wfNB$<(!5VX^4oDq?T*DbdgdXoZIQu|3&wYLA!$TmZN=J4_0u zt3fJN7vJW+9{lB_KvM0*UgO0qa0&|)r~Qn-V!IBdgQ%6t9#VFyU4HEN!h|eA{^m+5=(2YnZf175fg%vNH?h*)S6Z5D z#iQq3kol2JG4$btckx-{xo4$b!-?cMop=~>)_HN?-R{EfTH!1mKzG>6EvDg9;ZbBM;m{7A2 z*Bd5gHQy>Q;c*d0zZhnIG019u;G!cSkY9da_kJ;)Z|yQ4y+K7Bhf2H=6y(3lcVZ3N zRcR{2z>A$07j5qMKYpZ<%8m;@-knq~CKV|qq#AdW8nPH%{UI7$G3$V^@fW|QN`9Bt zuKENkJKpeK5~I@0UC{UUI;7}sc1kA#_%T-wJx34Sl(seZjOHYAIWfvaq_%gK^S{Rz zsb(hd>0@@h*j=W7^q7SuSGN%}z^itqK|KP^9s3_C8Vyo#zw9Q%tgOeAoqu-{NuDsxUyaI+#}sGeGnm<-Mh*6*!&A~g6IQHzh%O~$r9AhvXNuBb+u~oc)L_Y@>zodHGQ#=^% zMELLT79;ip;dBg{ODm$WnkR-$%o1}W(LW_kRl-D{)cFHd)m;}>;U zj>$q4?;HE-#G_Rv7>nb7S(S24b8PbTP@P%3V?Qi-=-k8q1svujt@F=0n887i<-u9E z`|5{uK_HcInEyb!#w4pI>*&|wr4d}^+1qZGitK4e%>Imz@TwgXto1WHVDq>88oc1l z7=I#_Uf}ME+52lYZFtS)WF>JuB>A6n2SM4>+qtS?iIdLn{{vdOG_L7OtyWut%nfdl z9sPf=q02)a;FGOszWG|eb6`{T4<(?c4aY7Zs}MNzdmfm+Zpdn&H#+7&>RSk-nG(xw zhtWYMIUzsRL3{h9hC92XBde}GJcGF+GW%66VTvgSt6N|FhVcjI@`kINA^$H1PrIps z_`Xjy&1moSEz^77ZBYN5%lhY>M@Y5erC(KoEYq73|D0>>&$sLC`+UYy6KT!6AIc1X zkOPdSZ|nE9+;lHiG1-F6@0fX1JdagXY7l>b#gD{)^P}Vr34Wd^Yg@@N6*I;+THML6V6mfXv>q@*hlpU^A&)Zi%+V_fpXTE zej!|@25X+0%fhI7zXDa7!L|Pc!-17Q!#*-XWWKE#_^#d7OY+bCKAIu;ettw(ANZ_F zNRBK10>#d?#2Vv%;ccGJ+5U~O);`Ma`8M~@8sS5VebTF{&`vptWiiW%@bZ#I!8=iw zX!t0nXOQF6^4i@_;jkFdWwF~z z7h7WyQxjDz>3SV9HiXZlGU}p~1@K-$v(nL>yH6j=ppH&5iBI;B7#5Jkf(L_Xqev zH&WcZF<0@9;CqM0l*aOfMou8H#}a4Lip9~^(UFwM@sM2{b)ftjLv}0r;{3&-PLXeP zXAjSo2jc;+C|A%m9|zjKCT`dfGCX}1{&nHoU2a0n68vik<&X#JfA%KTQN|+%lxVzz zc+cDaRe(dKepdBT=Hd>?qN=a>Qpc!mFCIJg-1vH>Q{TeD>@t>n+;-K2D8LryXrETfl|iRFrz={HVZW)HA#s5)MvvCkgf< zIhG|9tE^MI_db=H07O_O&-d>t`c@8%G|WM(qWkP0()l>=^j5XixC@`;byRl0N$)** zr~GoCHNylXMsECcik^6oQ)z5M1yTob;NO1+^|p78E<%4nTTxrzYTRcXX|ZIBe0O0U zI(_ORVmyZoaA8SEaY2rdQN?-z=)Tuz5o_W|r!5w-+du)X(LRxp6;K`ur?9RzHfEVy z8yo9lCz-EnI>k(~1OK|Ds)9zAWvl!OKcIOI0(G2as>^r#DyjmzRS6I`jWgv7cYi3D zm|tS`RIGH|E(F!#+w1J3)%DhP5m?pZHTHHq-*LKU=q(cc7~)6vJIYlq!-Dj_Rr=1U z5HG5&1Le{1Icj}q8d92)3sw|D(pR~jnHS%2sU{A?y!PsowwRrVTaS?R44u?d#);77308{A(#1iqvf*{MTwg&`!~}JcPS<>x90wY7pC= ziLBnfTYa2WdTGj{R8_UAnpE*my_Hh4w}mG&?Rysk-j(cH^D2_RbvRZ$Rc7~8OLFEn zd8&w)@N%4*X5G!{t8hoIT7N1Z(Ko4wy5>lgrBRwAh#}lbNaq9g9^QV%ci_tN((N#|XF9-@##BJ38Xk3d^pIqGg{Q&17 z$$`6w02uQ2#@Q9Z3Xz<+rOSX6r*-j6<(gHm%^$fnF59;F@d^%1lhAecW9X96-N}5! z?q_ZJ_%Lb9Fsg8ZV2gUki9v7J6zRX~CK&U6`Z_ZMH{q0X#%*s$|L`Bei4eO6Ow6F= z6k~d9{Pm>wsHHFjd}(5naNzULxqqc?hMj!r_~+cN(Q%Dm_a2bgf6HlFsejw8w-H%} z76`XUHJpE0Z51SC;FD1(eYy~v3L;BZo8QEdhndFnW^7??;r$4sH3RjTec2t(yNSrM zn+Mtf0^x{E-zl{1YD|vEra^dLEWO%JVD?1C6>7O+&R#ZU&T+%ouoCpury)RjE=+(Q zZqTKo66D8)uWN3tG!TVMkZ0|7nzJM}-wv)<K6OrR-c%jAxX zhS9Cpx7d=rUeH4T@$*-N;K7l4VCv((G_$&kqlF>bi<8QegXZHNQ@YrnQ%#n@2`|ZC z3)c9E8qC$B2MM z-%=~Xs(h!u4qtZ9FZ7twKyj8H4Qz%_WWA+loER`hQ_+mCrVXwk!vd$KbBdk8+ zyhLseU5`!w7^L!%z~{wSDeMDSZ?qr9&j%6ik^^)joGwFej=x}{x`IlOY zw+r07GuAHCC4}~FE=b0L{s2(72M%BGL@(SEf!GNnEi5`@pre=smk>&0WEv#M<)zDg zo^CBQ=er}Al%^+}8rn#~*PG9E-4M^#gS%z^90|np1|>t4|I9~qhP|g!ExYfDa7y40 zE$(;TXMd#5<|1Ca?g#IKSp=`PyLxP8^>{>@ zSkU1s(E0nJ(;Nv&Ws0J)BK&hLqB3F*x?@T1!^lkEL9)wplQS}+H*4I6&A-&?2_YgL zJ72UwF_=~cE|_^<*D z4ZA_n=?aofIugbfi1YHfgFwmSTdS~su6-s#a!{Gr&<&bqx{F+Ukf(wbNZ%-7ABvdP zooaCq0KI3j`k?G;@pSY2S_8g=9_w_g9-wXDaBS2qQx(qN6|lu6fB3ji*@Epp-xDpN2k!IM?yyB`JPQi9QSJqKc&{^IeIAl(DWiFwUY0E9amHXOhjSa0xeRer*YETJcyvpI? zTGQ5l0oTy96J1bDAg`$*uluVVvtpqzrd0j_Cw+rEQjLTL?JcW%qJbaXgGw_xog^;C z|9wt*b0l9X>c^3d&z;UipaBQGj>#t6d0UqIa8wR+g~ zE^Da4)~uNRum;IIHf_P?rvVl+meGa?Oxth^uHbUrwA^CVQ-;Y`0 zwYGA~x#-}&=XT$`=7P}HDxJPH2d^BA)20L0?wx^?s>_Q!wm8Ok|Kv9DZ9Vhn)O5No zeCDUD0G2CYd^*{X4(;L5Iaxzqh#=f%c5%u`oQ)KQ>NBu?!4Euj z^}qjzY~zniKRX&%vA{T`ZD)0A&1&u zEO^~d<^fKv7VMXfl93QBuXgT#?RxCEcEu9E2kZHRn_YF4;Q3fF3L!H5c7(A}N4P>y zB%2G*+kNS5rU02z7G0SaHO>45J!VW9KQMq<227S8ZMxqbJw&BIYbNK1O~Raz;NNQy7Y^P?eQ z_X=?jqj0uKI7=q2RG0GSo5gjy9k~T_@$^KTd`&!-5-(*vO_JmnimKYSRM88ZtXKMQ+3ds2@r?{O{U+}Io?9Zx74sLz2CZO}v&Sc(7Yk#do;SGgQRtKn zF|VF`Hk#b7X$J^AAtcgYvK}_bExu>166Mgyx}rj2qbc(!^~EjV9>&Fy7w2<8LxSzw zN4=Pgp~KqQQLD+P(biF2YS)K96lQh&Ai4#pg+qFwaN&hkGr<>lu4I~z4;^L}aw zc<6>-5|vfMJo|kOslMu1qb2&7OY7^lLDXc^h&k=0Iyh3VG1pU7TfbUI%I71*(86h3 zBF^?*B5PmQ>z+Ns)*c?_h-!^dBFh1%XKW)5?qCLQe`fCV6|R)TA0W{GoV%jM)}(nF zUDdn-Twx3iS#X4W@Lv4I!7lC)scV1oU=}BJFOy=MRN741vAuG8t;5u$eb_*sPo4>n zZjxWGSpm{2mtZ0>pdjF=k-x&CSFZnhxpF+tkkrjvYxHif;#M zt_%(jAjTVX^P+%Y{X2$aH(UxXwL%FHiJX;hM#VD(=NzGT0@d)Qx9c3tXZ zIa4rMG zOu}U1R8^)iH@D;s)*-d5a0q-&%2W@o{mR+Q8SYjUTGuxnGg2dM2ZrvJC4TcWlnGgW zkR_2zDH(ykdBSC+qqj9(ynol)j&I-JQ#dKJ;d$p&QBnq?XuPhKY<$V$TO!;pNdys* z0n;W+|5!1+yQTDLu&ncChbo@rDl^`)s-rn#7ave`saYz*g>0xG7WHi`De*e3oyh3n zy|4W=R|=c{v^bRrKsT?b|Tn;k3JY8?>n#t$q4g0VgDXNQMZ>lQg3 z3UkehP1c@i#O4jRgEq1h;3;)oO;48EM%MNMj+VPqvp< zKj<_0G>g!s|JxbahZrJ05ceU-M+}#3RhK1Z;fvT<&uY$-am=x}dR4U!T*mpEACxPv z#{DLNE!xZSmB{Y?{a6AklfwJCn>06+IJ(D~8B82Fxjg4YUJ$#xC6nMYg^Q@4?4g>M z)r&$B2^)CgH%V!QA(Tsx6So`MIQo;l7pm8!NvmKXm-vIErzb~rF1@4)5Hy(ryEU65 zEL;IWsf~uh=UaK3eb)s?8i=e5Su+jCJi4uEq92rsTqT4Q8D-ZPo!eKelcjw}#MR#I z```Wh|GG5RW8b$2wk>jjwQpIV!fFKTO?2x?{#bVfm2~ZQcv%=%Wpk`8>B>|uiL8n+ z{p0P5dLU*05wNdLR|M^dl^VNTf`=@H6m_YQCBm{7{0=M(V)g)`mn5UXgSzFTip&3Tm;Zl@p3H*(^xRYQ#<@-H z+SmVaZ~pz_(#bS5$_nWUhVL=0$#&4aK_;w#xxSzqXr&+9%|8Q?6NOb zCECjCgoayC!9MofTej>h7(pj0nk}=0sxfI?o259Ge*Q9ftqIJJC;ly`Jr;z2P#%(! zPI9D)yRw3n+~P)yLQioT+|!N`YysOb?czuTclYK8c#B(ahl%!HbdQ^j%IQj{l^2-J z3pQDLb9s9uZ$=+9m)Iq_Api5}->=pF>+ipiMGy_f65{x+9`!oQ-pzU?md0Fgr3U0E zD)mP9pUXbI@5@n}XgFLt)nH7~|7>wNzTR+)qc}%>6J@fRWOLmihHVEoN|x7{8BuNf z!*5=hm_5#=J=iwoS5-q9*Z5Zw5&Bx+4(SIDi>@&gM|YbrC$Z#prthPD5~c&Ma1M2W zYc+0Q;out+RML5BdJRrGtIQ_ejSnOv5%JHtS`m08+DCXHix!Bof=V~10a45t0_Mqa zO!B=C?Xu%1c+q4SOYH1MO$Ntvxuc_^Zc)}-QCrACceX<5GPva<*O{T{W%vm8eigG< z;=_0Q$lfVkgKd_ylhlPjch?@Q2OwhKeH(l1#SYL=PJdcaDoojObO);M$eR&~5on75=OnklQrA*j=JH*8Wm9Fy z)*&$3`WH|BA2B%2ixe(HiK<6+Ft#SxJx{fVSHc0zn)|ZUvM6e+fI=47%mmwfB%)u5 zMxb3ozWvt){C__Dfom!fFb9_&yo>Z~6HKmew(AQ1p<O5>iMquFV zT(LIA$MfE;12E6MfWs^XJY#Y$afjQAdi=tq_z@)VhzF@OMWdL#W%ZR1Si9msr4cG~1oONq|rFt*U7f$avP9I98nZ>y1 zbD^Xra|kBcQe$|1>^edNJkGgnxXA$h^{1TMuU{>1_)q_ z`;@0_lhWiL4tAgnPHZ~D)<$eKXa;n*)P0}Fqs zq?PO>k!L27ZR+ad5w?0utLVuc)mXE_sa$63pcb70JsPa8`bw@}NP$t!ejiFFk?#M5 z*9NFJ3G`pwQxF6B?EuOJ6&1C_S5JL4Ucv1m%4)a4#eo)6P{t5TD%(tpQmr&2F`+^8 zsxwOXGyDP!+?YEF%>i3OWI`7tZdvI7=VIUQJ9TY99E6+CxjCciQPxY~%ao3MB>J}9hPS>OupDJ?2oz2~?YN_*1 zbcUO+YK+QU;MJ+_BH!!f>}WTWiQQsd|1ToHczP)}L^?-I%_Y8#&)JfCb2?|OV>8?A zB_r)fq)+9yG0r?oU-}pDU;tFb_e*BB!>aB;+>{SqKOs1%Lh{msYJUlb#k=#P?sqOU zdXAxX3*;qZ8ukl_H`G4q^(T7?xjC}1sS&Ks@8bA32-TAPkO=7 z&_wV_9pMTCzoZ22KmO$VX_BfDk@~j4Eh1S4>ShzuuG+!XDQ|u$MnL)P40}og^0X*U z!BH)GBs1UEwXxw;3+2WabgLKN+>Wx3z;?_mIr3wM;BEyL6IRjR(#cdV?0I6qJm1F!c+ z*L1K|^O&10!|}jX7d_tcA_4J}7YAaq*3XM_g#(#A-GM=!-iK)i8mz0%AKrEQs7P?0 zR%I2Zc0c!uO|5EW=Ed4#kbLmaQH$t|c-IU?1(VTVZ5-1ctehiKapZv3dj1NjJ`VE| zIAV5;5X98uUvE>p3RUL}Cs$KG!K_at(yG@du*!kT^Cr90MG`ONLm8XdJIL{WvoizigkH4xv`$=QfqE%2h6P)!x!0aW#)wkHopIQ0QP z5r86q;qMH6QErbfF)6{RT~nzxEP9-FiGi8>w5tD^9-+SMkR)4-q|1|CN|t0F{n}Y41nV-cfZl33AUmU zn>3|hTtIfT*elXe)#mf_GcuBznD9pAmWkk>!-2VnucGv5Zb@Xht{!{w3XGV4EWR9O zl)5CSW)lOAmQIm3=X(Bk!|z81U`84zRyG-F$!`2I3^uF!JO($f>uV{82gWt89Lz^JX{VjN6JHD9j4^j(Jk`;5NQ9;#Z}Xu$g(sip?M-tPr_3On9p zvvMzQ=Qob_&Ix){dE=v)(ja|0Z@h0+QNmxkl2sozkr{dhgIK?V{kt~bpJ@7kDA5%u zFHpH%_Z`6F?(cW9>;Uq4*p1?#+R|TU>o+7DlsE3A*Cbil5{F9dX#zH2i==UC__NG^ z&OJ)9jYLD&789LD7KM{K%jZnXYQ{CB6eL1;YX81aAS{kW+-6 zDj(wEG%$2jn zsCaH~Bwtg#_X3yP0htHSyO%BU^kgIHo8I)FHJjs#y3va#;a$Q&gE`nf;#sQ^8)8?J z6YP$^&>`e$jWEw)?tjqu(-gbGz_L>FU?2Fil6_V=IYkcTm>2RvTGAkZ(2q2u^^KV8 zP1%iZXYQ%*_@yRPoW_f!ZAtpk){TtC(S&&i6AsBOTiuNKoQlFM^XlkiWS##gUi@n18bobof5fVF2y^}Jsw znJ#&mZ}V~@@28?flXS=7ObQ5Ukj9iQUH&cg{`){Rp~3&nJ54FPTE=w0IiHD#cshOQ z(&eZWGE%5JPl^kjJunpN^q;8IjxEF;NlJd z6RK6%{xWEE=jNr>QB(gVoxtOv+#=*}x3$eF3C#E+T&RSI&8kVQXx7A2Kzo%6C)uhN z#fbPf-;$&sd3@}5$dS%E4}xFY-XypFjyBXwM;17ZIYMth?PQDN~2XAou(9yXl=ny zcKbC;8gElp`USh^g8fVpyeE7ip~MVOBMesqlXP>NI5xvt8EQm%ccPsQcPk+4D$S@( z_!rh$UvGko16huHy#972|M(Q<_m&tpxKKavsmjbX9Ik2teNrG;3U1!G7&bo< z$s{NgvZN%iWaTI*wG7{)-~&4AD*ALo%%&yakrsEzle3gOlv9yf#iSr;q+ut>XMc9n z*1+d#Ty< zO{Cn0+hpNmod00cVi$hCS|2aGl;VQ0?kn(|jF6Zigs@iDZT9t{nsLxnB(TCHouDR3 zZcdtK^1;I#NIAmP2|_=aXRi=Lj-{tm<<|}Clk3WLcIuF+GWNCNN{Xf5bl}WFX z3rM+F&czuoNTT*~8YJXv!97kp%uTu{&E3#AW?NgW3KufiVK*#tmsWMsQrajwH8sjN zHRBl^ED_YB%EkL&VrVngFkfb9c5Yz_gy&h9u|;Q+M3-_$?3GP!r`ijW43wrwkshar zXqcpn)2vN@4t3ThXO+Lkt40ofh%lTi1=iwy<zB1k(Wg*vOb^W_H6XDDi1evW<8eAlRoe^Q5{>@6gahq81POrT zjNk8C@$yd#9O-_TG<1IRW-Hyw+Fdj}va@=e!kSm~%NdeEZtc-(*4|r(Kpp09c@Up| zAg|yysAo*yl=|!M7S`^cpyB0lSMUvyJYWn;s&R(2bKf4 z_d1|aBY^+fbklS-*VujGd!T!Toi%SJRc~U&Q)ix&>|WNnKNp#qZUBfKzOlT*W6{6Je@4x6l_(;_E>Nkp3+qkFK|V}_UK_=pw|ph7OO5uFdx zurhRMQ%pYylE4xPp}q9zIzKveb}U>H`DjauNV_CT%#vc)~q7WeLhUNCJ`m2XG#6}Dmh zprAZ*!j3n~&q17!{#QgQG7IKj-GI0+oJ9b&OPXsjTcpiEt0q`2^JW^eq}k-S6kPtj zA_$`}6wbcO-=(_+kbagkea-1PW|Ptbn?%FxuBPWU6YsaQ=sBSSNGgq=YjHefG}sGCCj& z^2?!Szteod92RH@XnA_RNb4^G^YagI_UqWe?ZyvVjXOiBr#wmqJ*Q-uru|@!j<9c{ zvv-9A>qv@!hO2XWT|+1<)sanN5Ce|MKw>1bGP7{EZ1lkW-<>;Gp0-KfdE0!^^^ozD zw8$;#%bmDXlXcJFTJb&4PfEYsE4tKx zOp)Pi5?>0v5*ReIts6_W?^kNhFFH__7^enH@LaK-O$To7rFM*gX?9(NE7q|g<>IRh zAU(t@wkjZIvoagWZ)Ap7BRvdUS$VwW=jWzPF&JMoQDwpAY8GH5-#$|)lsFa<53~gh zHkHem!=$gK`TYJpoyMqm@0?|A)XwKvk3mUQP8~&X#+dhMrP=XJ5o7n9ZrvnyuxE8L z;-6yFT6p7N-USP>H#_rRCuEwqk|+azO8f@cD3c=M@?a{FKai5gUOXc`*PHVJgZk#3 zJ60A_MgaX!>Mx$q?rYSD)9-}3s4aAgrrI_di@QLy&32=>k~#y#`9m+!!e z_$(K6thKzhdFfh}aS)GwdsBE<9@>U_3-c<;e^Y_}IA*VMJN#QDpvT`R%E&QAO9Gfm z^ebop~gxhSdk9uAXjvKfy0w`M&1d{c<75)Tl7t zEmF&sz$h+vqyMdzYA`gfD73yl8NKJRW{9m=(_!5m>o(s8 z&KH{p1-|zwSV(()S!FdQjvi+0a+UCOwaKCqk(kG=kJ2w%Q4VIJJySetai%Xvb*T#} zrry6cvup|mwSq(1YlFBiZavQf=w53{BUo_GYp=I`(b`nC)3Z|PTV8}Ix;M$y>9W9~ zpT7fu^Bjq-kohdhO8109*J^}xyy-O;uVN-E-K+G<}WREA!@EBwo!nG+9ucw{_1>%K&PO-l9B%Q zk<_p0_wyUEhGapR8;>*WzK2Iya^N^lFSY@metK2W)Yq1<1To>zusUQlj}8A5P6Wy# z-Dz#)HpaoUPQb;{5Loi=WBBzVtw?G;%!*}%yfetell4}Rv3Pf$KK|0fLdG>E!`_wd z=AeX+l{CWX_dmW2z7*%BeqOLxKr9YrKP43eJ8INpWNn zJ+<|@e!RA|PhH~`Z4$F@zso>gAl((!P+zJ#4qq))Yu2b_MhA{N%D8)ld>-h) z8@FDeklSv3_}utpP)%r9cUkJfC9x|XheWc-lP7q|TKaI&b4_g*jl|?s8~IdUY(+jq zm=`$D*t|7Rkx84uUbV5(@D`9(VcfW-?zNz<^*t^mPC){6dV}@H_%ja7qdLkL!GSsL z(`dgf_}p}Ve^d3u{2Z;biA5Q#x;KL~j+L-=HuTT&x4SdC><;YNE|nM$QpHJFfeAUu znhM=SpN|qs;}~A9Z#nin-BW5JAQSAlKV6)Kj0A?$*bohwG8qT+ACn;O&G_3|X~WiFv&sy)s^`RW$)}CU5)5eR zOe5oB5%{IX^0J zeVC9!CKQg)#i|zk{bfH*WUqnj-W5&f=bW4kZQXk!p07=B0u-ovKL*pW7z2O%t$+QO>t%0lb`vUFjyf z5@v%njB{~9t7rXHM%V3gW`ZQHexCSt7}wmrNgriH8M)~)ak)Ct`tn(yLSCfA*DD00 zw4$L*qx3{(ZB1z@oMPi6L2G;Kzp%=wbF@2BAzkd9M-pix;OEj+)}Zm7yIg^Uw0#~B znZ*V}LW%Z6U&Xt4LzYVsHJ%!H< zm~kzB-*=1P^O%L9FVLk79I0CtT~lfBO^%+d*qI8gE#$@k{UkDiRHgH^QL+t0bX7v= z>^!fP59RMvH$5r^HCSxoKS~O5@3=1V!SmQyA)j5ht7{34!%AYuE9Qn`mucao_pkE6)^{B6iw}g5x8$PAS`Pb+>Og zo|6+WTYEPl@Kk!9zTqWBS*V}0ih<}`?u$PhM54tOm=FnzUL&t_>`v1&&vTnjQQ05f zE^^}^H#pAz#CFRm6v{D%+9_+)C$xllR%_52ME)m06hHk|DS>@o4!=Zkz{ zqO)Ki{LFkAq_mkc;gf6s-uWO)=4#VUVuv3%@5J0k8Tc#tDBYTqqC88L5g0Pa0+A#I=_c*6J^$ESn*I(HyBd6lt@6do z+FiP8I*M8>l`TJ}EatJW*%L)(B_kz@Mib!j4QVj-a0SW8j7&Iw=vQ=m93BtBRURdH z)p{llqr)GA;C0#481pT@F3SmA2}7P2Lc^?b4UMbsw!Z#klgRz{nmM(@A^e z9LlxXQxTW?W zIR0%{8Yq)Fh5;EtkaxxicyaAM^>qlL@9fJ&-Licr4J&xJI?|0(IwGk>bVOC3EqHtD zxqWLZb5eyzmu+in=#`=VUJ#Y3g82+&g!11j2P)rxacv|8hL~OoM+(L?hAb4}l85qkLE?wDBS3qexE#ymms@)roTdGirlgjheCEDLS z`sbVlF}S2dWeOw6d&lq37L_0NhuY4e@ zP;(S@#1HK|uYFZbW45A+C(hqy*X0=YSO&wP7b0g4dQ81d>%nJFTRq%tl~u*JL^U~~ zXl%f5t-F174 zRa}*vWR#zHH(QN*Orx#l1tov;p|FpWYMwe{UvAc&ep=CYqG8=r77U~kJ8{xbhz8@g z6^b5=-kE}_^m1y*uDd;~!rBb1tXP0N5wK z-T$|Y5o-bO?3oS{#!Icqk~;zL|B&{HU|e~nCpdr2Mpwkav_M~5KVJuE_i&w5znQa8 ziu7)v`ial50(_bI-Z!=ur&SZ7*IZ|piZxaIc6w*0^(P@&D;nbMi-}ymKl-J|4sUbg z-fTZkun=t#3@0MY^Y_uTl94Z69ZZ^{BY{M_ngY4VGu;@$*05zK@!Y>|;K(zmz4*nx$|GVzCZUD#m6t zS;KLqcq4)0>>tY^SV!ODJA1srq4b;s4&p~ zcs@HqAccgv{K65M^~})|?Ie;I#GK(?nVpzrv(X)chwl#ujb)^}I2jZ;6y_CxAM`OP zTk5kg`7nXgJhDPy*Wg2(_qh2@8-?2@t1v5R8(%#Yvn_~ZdcV((;qa`XzI<`2TKL@B z)40bYWu23OqtXn1=PZYMM<YDiwBkoH?XF#4 zSn9^6)wWc|5ieIIsjQnkFa4v@`DHWWdU6@{Iee53S7O#5BP*ZqfPz@IhB&v`9aP^+ zF;#ytw~@jvZ6Jc#a#is!5Y@bK=TMAvrT`mV&o~1*RoX+OnpOGU#la!>7vywDO|#A` zc=QW&z%#m*Qb-Q#_^DdD@rJO#7iTfdp=!5!GV4G``E+dSC3U|uNthU1^Zq7Ozo!Z;zw8u(Ct?=nmdj;kt+Q-)PC3>Ob zfco-)#&v~kFOgKI+nsrNOs1&1T?O^!(1~SdWuPoUqV6H15Ia*6Yn{bl+Wl$)=%b zGuvZ$z24xv-$dz2bez?-`|T+v5TdA%=0dGBX-D(#l85vz1{0{}8}TW65jUJGOW!H@4JK!@!R z33qcXDczcSjkL~1cLNC-?4K)JHdL(c{gtvZDRZDjp1QKizdSA#uF?FO{O3o#^cl`| zl2Z+K5>&8=WF{WM&GwP602B1)m*<2&4|XWcpAOw?EC4Y3eMhQqaJr5jJkM*KJdHl| z+j}$J@=4kLlWenM$B(;;sOG+16fmo>8Xg@!e;}%t%*pHvs<>!#`B*T+Ad4*3uoqVq zm*55SQv~~TN`TpMR7$rAwsz-vgCdgzBEhVr;x7)Pox?0IsP^lw+P-v|O#AA`AwMcJ za_oLR=f~q1pUKfex0s7hx`2ki#|O=^?ZL@*+>qZHK)-@BQDcmqc`Q~d->*3x zweL90Tj<)%2%S$M_PPluEDZLI44iznzloKlXNqXC*%XOqRt!jtIe_)J3)NorDKTE$ zO2;-}#<2Y}>;~`FW(nIHnLg~hAJdha0 z?YBM6UBMywENn1=aOaN;CYv~F2jL2FgMKH%)I=(y+^1;y$I@=@^yrxF;HZ_UYW|ir z=bO1lFnzMs`Luo&)n_uM3$Z9^2k1ur34{3^Y%Mu2+ysfd-LCHbG;3JKtXen@GcN8L)Iwp6HV4QJN z^kkj@e^yNj!c$yLke6@e;H`}<__;w}LFF(9Zbr}iVkU9i$3d*e@X-nag){eMFahbD z>qh0skzEj?_sn=O04;%5gB*!c)sS)N`)Yr6{>dvkg|_EF~B6b`9=(5idM`Am2yl{VIUp!*%+N%2%XXLq*~*2WRc z(dphTd;XQvSU{)kojZCW!&_VxFzCr*Ux9_9;!L+fq7@_sl!Q*EEqe05Ns~a_S-93q z`;7A;){g>*B9@1*_%?}#w=_+V(P9`{LU zXn(OMTz6TqIC9Uvi8=FNA|u$5fXNL|#wXpYndLsy{2*@sTnoi!t9Kez^@GN9GWc8^ z`=)8n&5rA%G0#(lWJbp>h}BjecMP(P>VB7LTOXXxWd@y=YUaz3E^5(B9c&Dg-(5;u zanp3uSqFye?VlwF5A%9Q<^msWS?oT@kv{ayK(FZ!V3Zh>n;obhdnTn#^D79qQ<##a_g!s3SvM3$M=b2a^>|ng@APQ9iPZ1)s zb*)90KpSnL)(c9^4Xbcprv2LuJsdW?uH(Z;l_7p_`|~g?`VmFtr|I0v5|85Q%}6o# zYD@jACIA;tiIo@U#gC@kn)HT6oU#17xHjjD8IV2sXatX`tS z-6}om|7Zf02pZYT>vKl0i-q5;Zy!Kn(A%3cCAA`jX|>yux;`PA=azIlTojGv*l7`m z1q2=dvUDZ7Id#F6vu_Z)D3RmRq4hX$utI|h12sEE{bGW`^%cl#P?d&GvhFwcSUYU{ z1DIhgW3l6|x{0b4VRiin!De;|6)m0upuVs(#ixxQ#jvEm^;ocdJcbjS=*J&c3nUoV zcc3@?WgjKNgs3v1SJ4tFhdOQE;&ZHnaiaV87C8uhA>vq^RZC%m)Q-e_>GrREXDa=k z7kp24nA@|W~ z$M$doFzb_FfV9Tu%@5mRRQM+ELF91LKTD!j@BTA-vO(M@>cP{wo}p7I$2y&a?!UZ| z7bmiB^*+ELSK3X(OaD>RVvOn2vW--Q2=>CFd2=QdDBi42oIX5AQ-FXzExgZ2uXOo& z1RIM?si!D7XTs;vX(I}*MtENTwVDnL@_k9~R!szEhpt5>RG^*G(Z=YPDG~LigSevf zxcK@IQ{B+75OWu2k@aW#j7{%0=4I>oL=l>Xl;WDQh4RW3635eaRK%CEIsL?e15pN_ zqq?%eH{FaN1eop0L~uzaAxj2-bK}8|L$hGM5qjYAzQ~DUJvBzIF=A0MWoBWuKA~;; z;@cr*2Vytp&96cH&U~C?ny3$5BZRrY#~5~Pc4$MWvz`A z-m9B68hLqhr|H%(ijyFV-n;LNBVC?%$!8BB$n`AOMjdA3_igAYY7g^yqSU$VlxE3(Mo3EX)_;h_Hof!a*xNFH!5;+AtCtt?FRBRNOq)?N ztV%rM-~VDb zi4-*pLFm>#^hsF2fIr?kf8xtfR<-4I!)KWw1^s-x_sii!cnQ}jpZRHUUZFvK+EHEM z{tG^P{n)B)BfD1}wV6XW(a<52-QnlsCx^h=R@Qg_E{zO)DQTlw_3lHei_cpiFTc8MJ%W%GlkjPXX12 za={i3@TI^K5pwpy^${7O&*Sy>#_XO>b3E-Ck+o76GZZ0#+^%;rB_$ZuH}PJJvtxD8 z5g1e?%`nFG6k8nhvIzNqZ1O2BRue_d)*DLLd2y+lYh~yNj;5ZHMrB*(;CSkahzK0< zfJ8Q8tfQI>iiX?9Gw5C6{ifG`RxWa=4qaH7FY|XTuqZ6quGNhdbawT2apYX+@Qf9Q zRy|HT*6I-M<=bUmxTC@5-y115rq4CSJ?NR*9Xrfr8HeUSL|8C++%eo&5anS0470(KW~ipB^l`OVs(Mr+Cajq%9Yc^S z9>bY+50XuJ*~s{_8Xg(WLd0aG$YJ+I{8=)dwR?0*NRI)@f40G>7{aDef#L^%j`9jD z0K!|Sc7M48x@+9m9D5${U01GM*I&9|7hByb z>rkJC?Wk`>%i;>t+(4^=H8(R=Q>)t>OSvL5cJ#_61-L=dWY_qWd*jH951lgz# zzN}8q)_8LwQoVV%B#XOD>J>ZaZ0+v2`_m-L#j~#5JTZ@wGZ*qhn!V<>tCBFLE})g8 z6oaN3_dTa|?tV`F2Q^Nf)q6_I%C3FmGL6LLa+y^8j;rC7ah}hF$@S{?0PLm&BT{zQ zY*^un7#`#=VI6z&2Ti=0XRuvgop*yAUWxM7@y?2XE@a%)?^vaJr??|5Xu=xzso2AN z9X3>tS?OK^wje5;uTbK!5*;pnh#*&@0Ez4PD7OQ^@p@WPY`1~IS!C|^Dj_~RHdCY^ zi6b$|ScmEFIS6BQG3`i^Y%ay;ehKBA%cW=)g~sK;1m_T_R8;86q~(z1Ep4)T(=WNc zZ0+}KADS)V%{Vy8#7L3e7F{>Zgw7mNAMTbRC&G>B&_l$bi`rb;EHV!Z`emDm3!q)Z zq(bcmGnukx1-66gu#fqQuZ`(I#UT>aR7}-~X}wl+YZxJp^HYHtiE`iFaxT!KZ+aBp z@aa1rhKq08=43LEA!K}ZsZ=UH7I&y4GI`mN?u|vAIM~Qp$NkQ+Y#jS=&UXrdJLB<2 ziTWU2%ZDvl3u~p!s17_+soNC57k-#V$Ow*EUy4G^u^xyj7pa}M8r z(~R|;^675&3<*Bnyj~-E>9ymD$j56J_v)l*IkF?-UbL3&&8}ZBI}bv+6+W1bqd zZCIwo7Qvs{(JrYk6t>>6jn_VKK6Utb4>zqUqVVAq%y(CQ{KRB@XX`|b?aZDH0cW`| zynn5lVsCL2&g+f-AaPUFr8WJ6X}E1SGvDfHiCg1iOck`hST%c5(qgN(o4h3I7OcG2 z@(E|7t`)>#X{NLNwYdzOSeh1EGLBcU!ryN}6q9JqgpjWOH#xTd^J325t||+euhmf< zKWsbB!AjAb{XsJrHK)5^4s2e{YHl-?z5Zw6r=l{$#|JK`hE?9I->`PD_Q>V_bB^*q z?>n)vui^gk+ftoDm)D`q#klxBA|N z-nz=%y-?P#q&tF=)l`*_iK*6DFw0Z%U%{lb&0F+}*4%8liWYwGit6n0eLQqW!wM2n zV4E@qOGdUz;NUK+xy)I*KI_XfWpZ0Z?wl@bBX@NjRt9ojkBwpY1b2dUtzN0FZzEkI z*Y}mM$=J*p4nZ9h>Q39OcWS)V?ph!cYaP!TOsAwYW?xWY+OG}Zmi|FQ2fetjX$%$6 zHu*ub?(%G5Uv{RvAv5Y``4=y&vIg$3cA0tGeOBA+6A3OXh@qzyg(evjN8KO}h*qJl z1?_hmQk1)~B_%y=wpOwxsG%qVmi|u5R>8MI&P8JF!vXya6|0~?pVddl-4DG?Pd0D* z#Ta^#bthL=dT{nB$+8BW5C$S^48NC?!wq7-(OdgK@PpvXg%~8KJ)I%41FT_<Tc5U9u>pYn_ zJ?Jm0qH=9=P>-9X=`o*I(f^?!4TQyhv%r@vUtmC57*!k0ivn%azRTMLpyqiNQR`mzd{mll`6g? zaeVM**oTqP8#%7UbFBM=M&s7+iulKok+}Q_-lWX-gC@~S@Ng$d0`%8h^NhNd)z$WS zNh#Dq?P!r z?$_^0?I=x2ek-|bI8=U`n!e?~zgl-l3*$SK4WoG3pv9Ep5p)hA7areXZ3M+ zLz9m57WB7L=1%Hu`~7LE5VX7&K^;0cd&KOq`gyNodO@vcRRY*SL{ldp(Zst7(a@+w z0kLTnr=kxgi8kGz!eg_VTj3+L8lj$0$vLyTxzj&rlq|y+xyY;M!zF2tl+FR%^m|{S zAvDScsJ~Bh{zH_1oSeAFMP{KdP90JN7V12B5>e4N>MLdI z@Z8OSt`QMgzg{v#LcP&ZoQQ3HNW7gok0xS)sM5J%D=-JT%7o|6-*lDSu#1v8?h{CRy9|HHGHsk6iorl>5w^1* z@Q?b_zE=^h^q9)Fx>H%!?0d;KOuEd^2ilb|fyDJXSfrnIT~!>>yX2|S-D|&Z$-PSRVeDexqA4;3*4>+{K?lD6h*T{gmY&m_F8lfesYoLx5yQNkE~i1hbi3*d z$@V@F7blS52ZkWXunMH~;*!3{7rA{F5&V84kYOJ{m8V=^P&iGuf=BgvGlmZbM{Ft$ zm_(4Sv9XSuG~oggp@X?%v4JtMy}JIWQK$ceE$T0=`ak&cUta|p@wMT@Gv$zUTIM_f4sQ=geK+KBP%p$ZQ}>csyO~=o9j4EkUTh@%psB&m?G7Bv)$Gj~rJ20v?@~2yyJnkY(PX_L=TWUa2*XmvO$LTK zjnISQW$I$cEo=&iyoya;v$8M|pi${_Pf|{4N12Q!#4gM} zY~03aM#C1Q#fQ?ce#_>s=StHdS zYo;7z*L}lUTP7=CvQ3HhXgOei_XfgC)VHHmuuO*-@v>?gYS)hEBf;DbMJMf=z6}nW*XgDSpU;TtDtV(Pa+m

    <%xUo5);1I^$q z@H+lWcHnA&XFvg*jquq7C%u*SMj=c({`eNX6659oSo8aoipc#B1RJ*lCP!Z@H zub5=cF97KYLG!n0{8z62_BClaaXEQ2Fh76QA{A@q1?FfeYy0?}Bbe(cg{;9te~SKE zn$-IBuf0a36gUG^ckJHd0>G?}C#p*29Te zj4YPP$}Wv2g`n{+Y<}(fvj17g| zjY?6!rDoWDVvRiCbMQ zN2z_s{J)v40FJwbthTssGkXY_?SH8+?t(d*J-WvFaz&e_I+4)l)5NJgvf~k4^l&^f z`|%F`xMP@X$73it5_k+lqnp!Y9$^i#XAJR(dqvE$V|D`Cy3+v1La9?Xp`@$`nlZ?{ z14N6PLqsi1C!gGiwtQg80c8y^8{P{DAij5GXJPKD32_aUoxe{mw{3pgFj(|3Mwk_h zV{N#J6uttAJxNHB==OI5{^=Pftym`ID|FOVb$BO3g|z{Xf&p@hCO*s;`wPI72ADaZ8xBO%GlMa2pjvRR)(zLTIDD9?EARwM_`~O7>Xln_wPSF zOtKT|xDf*Yx|bzEv`ucrIFFPii&BySxS_rZl#)qSVG;^J9!dtlC3ca{3n)?jpo-Ep zlZgNF)0cuLSu?CDMFLzu814u_<@E4&SpGVq@nBYSKO1O#z|H;9S#Xo3-*RDU5|0&O z7lQ5&pYD)6yT{oKFCU3(I{vK*u>E_4$4B10hdn?O08S3;cwGe=D0t{I!nGCmC&$U> zN;|{y;*fr_p0unv31?FMvH^BJgnT;2#~(Wa)}8?WBUbP4gEY{om)JuMDrpQ|GN^*EjTZ7x8SuoYxAu+wQA* zl(+VbD}VmRp%k=DjsgPu$oantRfnJZm_sN=E)<0hjrk7lY<>wqQ6<;w7g+RY~zdzG*G5p$?h7@3KEm9==KB;Kuk))WvXXOGvnFR~xAF41V^?w6w%wocp zh$$_>pj_|P0tSb=bs|pT-ES_D=0q&FQ+BwC2d>Ce=z5z|)Z^`yZ&~i`y4I2B=efBp z!M@g;SF<4W?MhChVhO48-eEH%LqFe*d**gRA0lBebTQdA0CWMH+sI7_Q(SM&*lW^{ z$>_!Bb_rH+RzSu*K43?Di%Xa!MX0UhNuG7ii1U+YBf0cSV-v_MR#QNBGV}RwpZ_k& zZ$B3f(1d}yfqr0vp>7e#H5Nw=jfV~*v1Aw)$md%uvH$@+Iq=a4YX`W%+eh4bvd@Xn z$r;DqSWJD}G$T{@sZDZPZ&bZO3vd77><7#1Z5B;hH*4<|E=WrE7&XWoMmmpy`yCO* zvsml2U%?LtY;tYr+`if9>ItoPfOw}}R~W$P&YgZ^aZsEXIbEh{Pr)2+X z&jFg+SddG+YifLs-OJEU6NF09I`=IvRWY`I%}ZFQN?UvOzDieoEvB8f)WB&!>(*;i z|0hR^fi?rVKA-wROo^*E+V2g-d8f_x`I-oVT{=A%$ybDvmn^Gh7cnoR++n6xHPCzZ z=;-I|9O!1L{dpa6JHNtE4n>yr#JsFpJrgG8VE5!eA9j;q{WqKI;>Bm6g@uU!1- z8O*zBOO`!-h2{(Cle0QmT>3P{6a3PJEsgoXiF!}jSJhs?a^KFq=1eqFuV>c1P-i8rsBjvhJM zY{%*!WB2x^miz=4X%&Ca6nnw9ajmUfBN#Z4=sGQ*9ZWVx0z*;i__7;(?hTDX&zt|W zRsP@qNNeFH8b&yDKX?g16^B>OuWCM92k+QS!3QL=4^>+AgOrVcnN=3W;Ba#6n4!ah z3%65EyU#4S$Nc$3EdJf!3qEmPG1608IzyR#hMO@DC=Z7#h^TsTc($ut2 zRL*t}zqnlo$Ek>$xd8_@btnHIFaq#iw6`qb8w+o90tn0u1 zt{S>)O3ap``oB#1UkfB9wg_dZ*OlM%DJk<6c$UjGcE&>ygFt2?un9}77)^OvW6>I*+h{H8X~?RJVsqxyiG17_XP%Sjlkd&W zO6LD`qq^@78H6D|&TO*})L9_upImj|5VjZt7x~_?c=CH4{;xbXvHz91JNxL*(J23U zwbkC|YPyeMAAKngFU_h{&y|H;G9cjGMhjfT$+B)SxZ>)U#JZ+`${b_H>afwWp#B@yLl%1pi<`nI?V~>hOpBxN0^AVpC4q?tS!LIParAkkn(H1FN~5!oQ8%S$u4_urqpn~_Ncs72i1f#y8+AIJyA>)moc(SlceWwB z4oU(aW>O%=#bsT?l!)Bdt}&7A{8JLx2orPlZKvhMfg_5#oCJDAnnNVVX^yX7v z?66Scg9LA0W3<~M>DC`wvF)yH*eB`IgI2*GG~kdB&mE;dP1<1=^&?~c8yxzz%Y}Ua z$6V;@4o+>ejfP{c?GJ_gE2k$=a!mD*W4WU{^FL@BM}2m4|2pgv3555!3mtm`99Cy% z#4muUR!1n-w|6>v-c=Ae7_JiVQ%L86j`rfzYpH5mo{Al46roD$Q4>Mr z;yOYJnktFKjze~cz-laUOxuKv0+^&xzi|rzbAyR@+#CNC`fu%H$!;u?9V3)QRC;)oHN8f(;IXg7BN0?XTj7W z2RRm3!i0RM=%v2_zZ#=k7u?19WRu@vc54|kc?E8$3am&RdhHOCjn%|DBuM5KETBcP zK!$`nrIU;I`ax8uC;v(l{g1up#A`Vd$xOKdNAeGvRPF(jma8Xf`C=pcZfsmYA}eN# zl9H1G7BuwqG_5Bk?fLAvN`q74vX#9r7Nxjz;*B-wGb#<#hpco4O{fEPi$#ZU(&Fu~ zDImvkt^XW=XhwU=!(HkqtAb@k3E*9%yI#L@`%hQ5t~;LE{Y+;i0kyR?&GZxC3 z_*U4~3{l&CBiTroDN&!JY3{>7t)MJ%c7bGXA6!(T8|(~@uNyHLgdw32H>8NW6vOig z4-o@1vAZlUO(G3kgc(jSa3FyIe6mYLZTda890b{dtaGTc0Ji8$UNbs)a($pt(R2Ik zFb4hh!bydwG;nAed8#Z-+AX4m7^sf;7_+Dt+b|0BFY`7o6ct}XjRDcA70^HdcwX8U zm{w<_=sP#y?cLer1i21FT_XUDNOF7U!^%i$#7b}LoZ-zz<#_D?sw+M0L*Z;3Ya(om@8Eazb&>{4E&&p>m6LtNFkRFS8car9 zI(y#1ZCKd%;p|kn=~ijc`ie7Q&n9jMF*hUoUq1TRCkuW)`2Xnjr*i%aqx-3xpUU}@ z)A|3I>yT^p{L=!4T64L>68T+k&KZo;AY;0$D@pVsz|6vIksx$6(qtq}C?^y9Apr<; zi6FtS<}h)ju>OuPt#`I=$vCtkrf@ix0LjeOk;#-W>5{%tG7TDZyx)+Bs90qqP(enMq!LwGRTan_DvG(7Xq4}wjYYL^xs5H7>r?98wwK3oK~HE=p`ZWRv(cXB z(rVDOvy2+AoW8@1iD=z5>3*(Y8%Q7v3HFJg@{LH+lv3pVPE~951;pMUvuSm3ipZFIjIpsY zQ+}w<%G%h&!-sztn0B~wJFlEe+dW3EwJU@&$A(=Z4QrINIBRrs=22|q+H%)t(HAa3 zVFZw@#N2j5l64ZcrL4-qBdKY|MRse3%vjNd4j{r zl`*`Wv~z~&`8yF+NMeDH-}xSAJ&So%N+rN+7h8#1PVySmd9Gu{mj9_v#Tlma9z(d0 zc%4%kwT7~KRaUy2beH{$=kSsXcZU%Q>b1&fcO|maO4N6;+ApWu)IdEMM6LZtCH^|7 z_<8+rYJllzkzwS*q`S@oQ=ZT71U!%SJ8_p5aLpB`ByXl;10JM*INC8kwU-rP5PV%; zR#t38yHj;LVc6W;cbK1!h5hu^#Tzf^;eORssZ*o=6CKKDKMVGn25l^}?y~jE(!+8O z8Dc#cBI?o;;tRF+h_P6N&aY(JS56aEF(0Hfs&6a~y3_<;5bYG&Ez$R~^V(zxS*p|8 zpq21VUydn_myCxo8@yYwxDsYRcwEy2GH~ z-uz#LL=lwe$ zl)o;oQ6u0u5W=s&rCN}h^d?_54A+#)Ke-diEc``zh;(J%iX&kMXVBc|-`kN?MK#bE zKPl?GDPT?P>CvnV%ilUZ(dofS*_zz8=_P+sT_2unI5FS6P@ju?6gpMb^Ws#%BRcxh z<>rM2b6>;xp=_&A@hU7B&L>3{1JX8624lxUt9-RdfK?)gehYsSwdf-h=`DviKJuHA z)^s(#YrHkzyuL4ze|)~=^K=IGQA;z_HmNk`N$X7_{lvW`P=VD^XR>G#x>@`O&95`! zwIWPU#F#p{`twb>tTj9BSls37&FXBzo9rW<(-79# z3#V1omEU&gfO+gcm3(5GRf8#cDWm`5kQvAMSB4pmq2u z=`6~aSWPtlWN;*CcD3xm*$jHs8Q;D_Ju?^!fU6!9LkWmsi4E~7{}wCUAN=fqJz{T} zm!dZzU**kiC>W<#p$`!FLY$S*=tzki=W0(btj2tki4=6<Ewl_^mLG))N*N|4`uHw#MFM#k-Yf zEDssQtVStno*$A#dg=+>>y*K^;2lD}9XWvS|2K86 zg*O63gIB((-LsBWGAwfBH?xbBX}?YpU35N`5E3~i)dkWXeqGNH+@iSpYcS8V-qIZh zsFyQ3*4@Yk8QCbYEB!Vus81$Cnet_%+H`SDxa1Q`t6=?%GOfj8hMVZ6hU=ZM%;;-U zonB0Zu2XibvH%MKgZeMag;A)WlFU-C5{ksc$fDT0&~JaJ(9hHV6Ahe5l!Q9Y<<u z9?-7M$`hU1Q!wS5y(`=WM<@2YIZis=`n^=OUPfb&!uwYfqDra|Jdr{qugK!U6i`#2 zmXfgVf)H{SL=Z!djQtir(1$?@o!-xk{=VCr5}yg)wKbzKl6GpQx|#UC=#ICUJEe%) zz0H7iyer(pUUNb4^-GR`mL#k8l+90Ty=xX-N^Q@&*JvO!y1kp*3SoGoT{JR=|9{+s}(Eh2{_bl!;iY-=2sg(G5 zNa$1uyw;@v6Pglh^56hfo>QUedBoGu$<6_b8#zc7p~lhI4)%rVoBGS`pRK*(|NSMi z_t^T-Dbvqiy}v@6!xCzqucUJRpy_G@lo|HuQ#=O=+?|c54loX$=m<%)qBtU$cb9a#aQaukIkcrYtL(4CMV->tr4S-WS8NSymsUS{`ej@SIr z7x8Y;lS0WK$JH?vJeOR{IBlaeO)Bed+`aPZgKN5`>8I4L{6t+D;n7?FzM1`e;-?0F zYT%~^ern*S27YSbrv`p%;HL(DYT%~^ern*S27YSb|7H!G6xro=-PCYiuEogkuAEatoroC)1<{Y1`O{>gW>w{fS}bI#{xWz0Rp)$vUihd}Q@wIwfWQhSDmtPdtKht#-bC+QOP5@c}G z$Yf9wO@*N_NSi+jja!w2+a6mTy_H*?&@+4d;8avr2J^G+Q={B1(UPN$WAgRn%YDij z)AgKtB!&j7f=KNRh9DPRZuX?MxKzuOsEKj59UQ+vndW|t zbx~w^a$uBtyIeS2A_Hr1GI}J@SrBF|2MhXDcX$OSLeeoRt8MGYdnJKJ77k1-f(L0K za7HP(A3bincE4yp^aqXR9lQq!W5RZ1k?c(JY^Ns2OQ9ZSO>z5xqQ5f>7q^rPBa-veXzT7&@*5SZro?a*}NOJ zXlicn8|kcd3y?QAM%*heU()u;(v2#Q5p`>>+~*a!lV!b?AJl=V8bU(yYahy*uHR+Z zeUz$vc5$LaVXQq`Ehb=>vm6#En$iT>=-NolnEcgCC9^VCnmI>V{yG8KV=iVwsS1@Ec7-@U=Gdd90hEK*yYemurIOWS_M8sk;I@E^gMh zo5yx9Ao=rBypC^A?*5<&bkrLatHQy9s4J!O;^7TFWl`;lxgQ3-k1?>Fkkb(`ar5p0 zPZbB1x85*H)snMUx1aCP%k;evn+qbN@4YWmG)@tmlWjqnqbm0NG4Wx_o5$7PzI%Dy zJhZveGHw?qY|%bEt!F%FONj|VV_cXa=kU8QVnR&&o$csgZ=^6go(b7@XOdctF^;bA zTTzR(G)m2IhOLM>Ca<`TeNYPBA^H1OMLrq^b3`sKS!6M}BfW_o{Mr`DhE6 z!h%jfIdndozUbzTap@M+^kFah8tv>ahnQXtG|_u7HC{``?gooe(m!6L~8y|`I#+I87+ot3Jhz6A|~)|jdk1Em75Z4HDu45mR|4Bb6CRv58f&^(29 z(aLSDX(32rsp-=}So1&WCoFMXcXZ^29yi!uE(pve;I>hs zJRAhqFFXXBv=Sb50v6A3L|T0*37 zp-#KbtJ9WvJ3%%VVU-xG8mi7ib3cQX8ZSLpgG_`)Lg5U;=^QC}A1|od7c94L3T7%_ z3g6RS`$5xZ9ZvX|Dff+8K1C?*#?(l}^dinOa))k|F1a=d?Mg8CM0QwM)<(BtgsfVc z%KT*F+*Rx>daDnl2K4OO@TP56l*wR@3-N)eH*vu%zslJKHI>b~S(D0RpgcxMK@k&a zy!=po*x3Bo<4-3v&zk)UKKXzAEe+}E_1JKU<9_PyI#`VNM4s&IlGUP0bERzEsDIDW z!dYTJ$(-2YeP%q)<3f64W!FZ;gH)$Ht|Bb><2vg$--=TrlKp*f)FlkKS9@_0QV0F6 z=u)4j^C)!GvnT4CnawyH{|ltkIt+q?R>xLBg%J1wNj!1Frp zpq7|^cb627| zeA(_CYxDcR$YzyyZ#VRREXS&w8I?y{n6iP)g{Y(DWgGECZR45f)d1pj*r_8&9`7Ss6EKUOPiy*aB@7CXfvC_mG# z!LimYeR?!#{7QI0c~jKO!(aT7wXILaWQz)bc1>v+X!d+#2{^FdhE2G8YflRj13s%Pf_|jylnRRbqX7l z)yHwHfy|I}+n!hweK+Dvp7m(e8-z4oX__5G;IPCvesx;LD@eLyPyic`nj>ZB~Wig4kNeM%0eW!m*~ammNjF99iZOg zLFaJq!;&yM2{;{p@ea>?6sOWmO=J9`EC<~D{OE&F{$iyeNaoWhvuRB=#jW?#g!XbT zBV^Ie%v?0|*Zra^+GwP+whbqKX+31&iq3KD823vBgW))JcAJCwS+y@h`Vo9R-!9xL z)_i)H*{C4O&>kIUU)MM(&6-pu%xT9KT>*Eqit4Zvk;todB|;o_rS$aX5Q45j71mo$ zF+=>YZ4gOwXq=nE6xeh3pt@1M$G}?}0OXo)y)kv5%(j;09nR5(9lxFV{Ar9>4Mia? zJcgYlw@gMI7VWrNZb>-~$n2-UhDJ}kqgrX0*` zgM4sMS${z@a&*x&?yR0wwT2AJS$p^vO<<2M(5&BDZNlF59m-A~hJ6~`G;Lay&>B~W zx=kqao`hVF@n#;ZONSpYA1pz|Sm1Pnure&|T)GO4OSx)=KAWbTenqE@== z?HY3wDHVGW1Df#@BR!L$DnxtlF$%qIp;gm}{i4^}r;01ZZwa;4h*G!Bb?FVy(mOL5 zV`1e=mjM{gts-4rVwf>JYON-1!YVm4cJ@$)757+DoN%$$ZF#iwwMizDO_RBca2-!ZVZlzNCh;jIRS;C0@YNf~B<;+%Hn zNqz=_6_ULGl$qP-#OeU3-~|(M4fEIUPkVI?a|_n2Z8xS)*?lU9y=X46`Y2=asJ#Yp zA>z@7F1flz`a$-UfM?Mgu0;Y4`GkkX4#k0|PoCNk-ffSZb{U?%G7Mz3+LBJWrpq~W zp-p&5MMj1|q_%J$;zQ&aUf6HU^nHloG(R#xB`6FNzJsh$rwWI-e+a4f{aj_(u;jg* z!r9e6=%!#Y1@pFQLBT$ibhNlJ=bxkHI`-S>7T?pqveRHbk`ZWcUj0C5&bEVSIj#BY zcV@p`a1?HuU2A^HVdRyDY&|=d+$J*4?`CEZp2e-3xs2b7DR0WL*WNOZpIZlPDlBT$ zDXbcIq|2l$1DnX2K^B_3AbRh=9WX-T$T|h;mrYEXL3K$eVJ{bwEy8L7ONCV>BUSaU z&N~lyU&$GapOE9T>}7`m@qg_{rb&?GsuU~Vq#UE>x1EE{11JoN_+DKCOjd*pWgP`P};ORuc07QTdVIlGTj-5L9n#8?@a;G!nKRo2Ot!7g3^QjFPEJ_N!xQ zvmIdC5)7a#{*uobGPMfd>SrDUrV=(sib1tD?7ZivG~G~kIz?#TQ@ukKXv+3>x6HA5 zL?0oTXiljr@0HrZQVt486Q$2GXT)%g6F|n?PG+i2dV=?|U^zV5x9o8d3QKhZ=4W*T z6bc9S74`*wlv%*On6*AE@i)*J0E9;LTj_T-0e0@Etq(nhxL>?}=DIw#Iwt!n#*y`T zN=IJguPPoau_$}%*Uiu`YM)7UuY5&++4lx3cHD>P#LFTXRQy3A)^|3-}-yy&AH?z}d3?-E8T7O~^eC zMkQuXPbt@jx6@D2fh$}h@AE?t4f@Jz#=uN8E*Aa(8YBPqO(v4aAr9^Me$F0(z?U!g1aXHrZ>4x5i zy;2?b<<&J!(J}TTg@Xd^=1xv#=e26qj!cgeRNar}=t-^E+%>^Do}(e#(1qo!}x4UPwo%O|%9j8#0Faa(HgY;w5a`g9=a_^zSsa1$wR6Ndx-14g7aY zhqEt%Zx4Dc$?n|64#Fx%Ny49>ui62Is?|a1kr5UhR-r_)HgTaZISQ0{C*a)KYxehs z}O{f7CQRy|Ig94!k zq$HuEbO;GsO6VXEKzi@Rll?vS-udl)&YYS1&z(Ex-psr(teJN*FKfM@^{mh5`8*E< zdisURoJmXvTictih6W0!uixmdRq1vF<`u-w7%BB;FzZp1UAoh5x>(p#FwSTJKM;Jb_#-Ym|PS7ck;JhpzT zD2%c)uBq9 zwp>{?jyadWXs3S?AJjkJEaE`Urc6KdRoM#4jpcue>liWRRldX!60owS$<;okRL|Ox zT7X!#j0#q|!`#j`XtWEaax0B~j^#dDoc{BxuS@4b#?Y+My|YWjlMbe;+|}!fYfN`* zU0vQ`&{e#VyjT(F>=+u6)RcS=E9f33xa6Me8l$y^_f$9n>3Y$tL4enNPlcp9M!xNQ zY@QitDL6}J$xgC5l?$fEkk<1xC9SFJ%pk5`5j}WAp1%xVFg?{|RJL=kmC`i?6$QBD zp^>=VLG2S~MDi$1q;i>Rf2h%qycqJt_W5^GGl#Apiv+!vf46*<%ZkYmTmFXHHmA0u zyQ^x2lUXLJO_?uFQ(yF3>iCPbM!6O9Dmwj6z2$Q6BqN{DXx2Ju_#3~*p)B6C{V2>| zr}#Hvbvk^1r3#HZGmhHwKAOra)6e&U-HNG~ed~~5tdRQNQcg*v{buoSSBFxlOV+DX z(67Ju272zP^c5}7WTW}?s*u$T9etNlPQ@dkbm`vtTalk$NAsKA8=^Zv4Cb{(Op;%H zy+xZ%((V?T5@7|NMxxdXzhvu)jG|tjTLzl#b`5QLFz?93n3#4$=C1iPJynd5nL<{k z?dXVl-SQik8q^pTtE}hgP`-BzB;HOMu9=8{VITjMcCxBXNYeXTLkrqO?SV%9t%8ZS z_*=(Xk18}O2ri?7Tsb3dU$#F=dB! zKQOGXNy_^(m*vyE=$3QIq}Hm}I@rqhZ|_E2iP<4mzxn%Q)7bDIclxoYg$Y(YuikVm z2m)Co{$joIM4ZDu)1LY6eDbJ9KcN)WZvP;A9F|l^ZO~xX!}U`mYr%)l?&dc>`|*!` zTlQ`TUiG7$Ln`m+=wWs5az0rPyzN+Rs(QGBgauvx6>5jl-q*D}n;Zt+FKbsJ2y0at zu;gmGI(*;|t^2;YSaP`*k>okeANPKgIVzA3tpCihQ2NuD+`<9$u0!J9n6$?_pHSbp1v{_X?0mT~g0-)!6or1|MWsHD0^dor$tP816AE z;OIY3qc+)TZhfpM#l&y(=$TSPm&~1x7@^`2{x-s#iSW>t=$CD`#>^h+KYsDOcbv$d z<17lAm2ldYc;V{ITWlFwczWXQx(eK}*KhoDP`&kK}8 zULQR&x;W|ex=0s&ovhJT# zlp78X=DeB7T^b6WQ$$mnu&&j8EM1#+noBj+miQ5qfu3P{C@y3# zG~o1WIE8a_ZoebxjW_V4U4OqcsFF>rm6F=fB&{ylfYORCTg8;%4WmMRCUAy-wh(@{ zrT-Hd$^Tc>^chllU1VPYm`i9fb_<4gYa3)E@t-*^KYJK7eL{lL2R<3MV@~|Xh&F21 zefi^HBm}1UGaox9zco*48LlmO;GxFee6{!JQnk3eG2eT?HBE~)ou?WUQm(zN1z5vu zCXu`%V>ae%puH0Egq2LyDHkUFNY;aXKBg%x7lVn!E)4VUF_yLc22@Q`IvR%AhV86t zEETpl7tpf+m)s-t{hC%nQ9&;E`D5Y-i_Z0*#Ook ztX+h}*=YS^M9PCxq&1QXYnwDL6JCpx8BOpX+bz7iu3fvlKSrgDGr8ZRmZ?}eJns5I; zDX}hj)PAu1!^EkzYsjvzc$<)Y^ZQ8VrtBiCEI-Qy} zfib1#FYUx%3eIup@j05Jq*0p9bYw@yoL?_hoteI^e{L{aR?dBZ-(c9k4up$iiMiVi zmbwogz_m*gEg!q(9o$c7c1DwyM>9N?C9!UOJ5m&0ihG zsVh$ir5Rbk_`!n&oCI2mZ6aHL_vdi$(Y18d!et)lGq)t3pSwQb-6AhRxt7QK%WVrM zPo+zK80aD6{omMX%02}$`?4g_yl3)Z@{k9;-u8_Sh#&iLVjNcoGF5Vi33(7+uVv8O zfQKDe*yBd3++u>I?8@rq@8D3=_E=1hsfKQ;-Co8L#>`n zBZjRI2b19PrjrsdHTsuPBmb}w7W4P@cl_TRq$U2Wd%wxVZ^2m8M?DP1%X!UW*l_PC zxRhjz);d!cq+5j8QArqs=Zkf$My@@cUsD9Pz5%bs*vr#BzkpPh%xYh}zpV!R^VIcP z3X8PUm`BwPHYi{#1I^GrLa$lDQCQq42zBK$s@ltFp_| zb!is@ZJ!q>a6g7jUhf%7!t}ky#eisyQn~X=J#t9tZb6RxaVgtIZfx_+voO;&AI7E5 zp}?>+Jt^$bFcVG#TKP!@GU2AhlR8G=m}LEKf2xc+H=MyKg{XSCl+E)}ji~jWbARWt zJUr|=jraFEQd_vt|M>5P_4mivz4~nv-ks|ZpR)~N!RALmI{WA9I2hluZ7E5E;f|4Pl!q+`JB=8_; zI20MuiAdbIxr5rH+xt0hwKZ*b(&jFjayBHIzX&)n>}u{hZbl8R-pj(wem=L>`s^{2`ys$&g2iGVN>>9A3DAgM&Rz-v7*XX!iZ}tqiW*X4AJ{P;UqS+-A z(8v?bIKa6-Al|`HKkPoMFgH!_8X#ISI{7R za*c_2!#`&CAG5~DwZm11-ktc#8mwrsF47Uh&R9VigAGGz3aZ%G!aTj7Vf!j_9M2h_$xBWxX2fXa2I2oyx` zCrZg;fH%Gv^MZmqYYo?J%jZ&9m#BWMvi-wjgLY7HiK0aF6OWxv5&s%P?5~pJu~`zH z|M(Ytu+eZinEsXLF77dGH0^Lj)X}o>WjzY2w_9l)Nn@$=TwotlXI8(kqd%z5xa%`L9m0;QG1C@K54pR_gRSRD9(cM@%mglG=L^i%eg|2S*Ndq|PY<+sx*LwJ&yS)h3rUr$bsS-Q;jzYc&TlT45I!ohJ2EE;#ZQ^qqPhGU<$Q)E+mm`|7+^ zu29+1QuR|K<0c9>q@T9Ya0nvF4Gd zu_edGg9qwPigZw=QtOZ-gjIhN=GAxPVg|m@xBQ9#)%oFsZ|cE%W7gyV-naQ8&kkg` z${SOWa=#hJe%n-~wTD-HX?MoNHiZVIYedin1sq4Q$_DQ_5Q9R(l~pw-GEqaC4RTl7 zk}Hmgr_w%C7V6VG>w;Z-ThlJpQp3)h@WZ35bk`e2X3d(ZxLn5{X8UclQvU~5w z&iOLx_f7pwTc+vZo?YDGh^@k1CT5JV5R*L6;x8*APOhsmc1*}(Wlvq1ooDacj;d?h z>*@cs$#L;#wk8w?7AzQy*;F8%t0YjZ~a@Qws;mX9tCVyZH^W(9GpLKuoRQ2j-k z@HSG+>7&^7}lGLJ^8dP6uPMvv-c zSCa6}lwuuV$L2B6Crr=<8iW#4ftM>oT(-s-^XHvtiDf;p22)(K;wRjU9n>1Nu+%9V zjKXU?7u2K-{Q5cveBZ0=A&-=$ag@!d0Y0)3bh0KjnSP;1*wuCsF70mHQ+0hX^o&v2 zXu0^^(xZ33_yYtVV3T=^1qxtpx;*+#sHwE@X^2}BVGT{Tpq|62f~Fsj-VO!FSs2PN zsjnN&)t6j@iN(7JWwtp;b!AFUt`)UIIJ{MT4Fl7q;KiQ3`9a z5}nC5wY>UF{3nwZ0qxtyCGgeLucLh;-*fE#uGqNg5 zf=K(+hgA{OWXD&NT{hMJ{&FOauu)OM7>NN3HJvYd5lS?lZv|Ey3^Do7c^rM1Ozvkl zh)9Me62vkylU~k)6slAi4?C9S+hDzns+X8&UX*Dm=j87=rRFnPc{|IJb@WV&tqne# z<6Yqbz@x}HD0)woNA^25KUKz}eI%o@{tmcXeMk`0E&eVcFoN<`Pz_jr5>D+#W{a+D zM*F|OobiB!3$U+YH8?CCmga9X#7X9DmjF6F+K;u$z*TS)a$s6T&{{ozV7 ztvO!Boh>;rrX-(aS~w#UZH`ytzO~PK^4g&;SFUv*itk={jCuRUE^aiK1vR&E*IFi; zGIFx}a3|c?@;!9Z!X8>xgHA4hkuA1<(ni-^-*fx0+}`@ZKg+0E-imLx z{!{6d1ivIRCvj-?$p_bxCLgLfdF6@r^?DjC24KojNgf96Ufbp%O`>7n z5|j3#ifpXoH7xt^vU0_ERU7NlFT0K`{eJW5S`x|eJ7BFLujc{-A zV9_fJbmfV4tU&Mv8U+=;*J3N*VR_#xd8poJawSZVeQl*%U&)IYl(0whY`lpFkwvHq38%@Vg0-_ zSE9*cja~JF-!2+6DgLc2w90)w%%liX;@90v4cwhyd!-k(Hgvkj6^ZlPyflaVJ0qoF z)E=}@|1VaHZ*ZQ&>to`Bm08AK=ME0x@ZnLD}pP8n`Wc~)Y%gP08&lMOAHtTK^bhTYm zJJpLfn7bhBFMqPE>S>rG2bg)?+2%fbNy9BPbm!otQq(7-6E>}m4p4pf5)BotH7;5Y z>=g}aE>i)gQg2|;Uz>gOUzd&tmwEO=I=L1UnwftWp@2L2j1MfmQEn~Z}X*LcFj2{1S6|I<}BmM zbxazrPGQqh-#6W!n_Dw4Fqk?W8cxB3fbWd5s5#C%29i*r=wtTc^nqYd?0}@It&;^6n~ix#O~M3u1s7rn3K|CQwpg zwZ4;8?c|m9P>6d}t)}qCNM>>I-TX_*wW*^j%b2!RBmLJlvQ72#`Al>~e-^OcxJ-PX zn`}NU+mNFmaA*`R7;dj$#vkDFo^OosRWTZ zN_yU0H&2eGNz!}yT;F&v;qhjNnLp3e(B(EKsoNmcPbYI>wFbzthc)$?Dvkl8JnQ;B z{3Qq88SgaAIM8^0M1)kw{!2!6k%bdCmBQ(-rrc)y?p`>HM;8|ABRyOdm6m%CVCye? zn5qs zEXIh`^~l$)aJ*BX7}HIzik)!pj<85Z^0NLIZ{vw&}0p73ym>evV$Kr1~BhQcg7kE}5dY+zjM zx#)~GooxxnoU{E4A-feiN&#s^@!P4;y|s&m3tocPsXGlhOmi=WO$LH0Hkn zPvDR1yk5VG5}i&DjX!EMP zXj27+(=bY5>$<>HF;Czx{*if%nILbwL%#;ul#xHKGNIG!>Pk{Nu`JY{a?33-DEX$vnt}73j3>7GG&wVj*QLQOXRxmxYgOf&2z1$E%Df3( zSDR<=#VN~P{(46Uv^=Qh0 zNtkv36}j%u`=`ofV5}bWG`g}`*VM;0^t?c%!;E- z<^0fQpJ!Z*zuQFi7mZt;<L79A}GV(*n#$Y&RW-|nbN zed*ll`u1&rSDCbOdXuZsqI>lcYhRzbN=m`KIkt7-c-uQ6Wwn^D?y9v&#yVL(ZtW zprjG;^5Ch<#y@5>Kp^AIUg&KQMurh-~xxuK^SsdOkTAbyjn-DdO=&f|Snyyhc4Mj(!67=f zvABJ(GeDTCPIm7B>_Ki7=gBl}4wxzOnr7T6n;#q7F;vl3`&^%L=uG=D`T496I$H)} z2J|2Sdt2N&?oCx&7~E=POi;s!>MR+Dziwoh=WMEB zfTn7OCT*->_*eYZ{+I7sxZ?l#uGs|tPu{hmsfdDm3a*kH75?Stg}a8p3oE7th)80{ zo@)J03;jTq>V9O(IW#C_dRO3oa(~(%`}V<5^}gyX;<+KYQ$T@1`$+)VdD>>j>P)W- z!7s~?^4Xrdcy=w-(Qmyi*}r<5_}1-QjE#qh%~n2wi!oY7aiJe?zFY>jiY+ZJ?r#z} zVdsG)ZH{_gK8wp{u;e=aKC>O9ls~CP-p*3DgmmDNhGt!XbUb4}UyqLt2y*wY?jD*< zfu)*V5~AIY)s1|%pIM;CU3XmzPFthBxe~xcp0i;3uF};IH}mF?UW#d}6)R9qFpCV- z7kOV~Xxra;tZ_x7&JyqlEA+p2rt`l)9djtm=4c=Qg=jvCKT~$I0o?fw__yg6{gZzt zIGMSZ-O2gY$KP5*Qs%VQ0W1md;d3uVDa0o+(ay|MXPkrjxYbeD;n4oQwq{P>&AYRJ z3nS|vC;2NmG3=!90iC=BF=2)>>($Yq@V@);8kL^1cYwSVs}=0_18svb)DB^F-)rA& zd+N33@6Qc%Rk5~`ux(jw@*~fce?=>FKy<@?O zXKY&8FkQ{4P9MrpC-rI8g@EE-f&wLg-1h~J#@kCcMi%Cx7)zHtK~2IoOwi8(wWnG4-8kLc2tr6|>B9$*pE8M15 zb1x)zQ`i+UNL+#%2CQfB&r){KzkX#C=Gm*E4NbVyzt-ff2V$@M2B@gpj3-}&NUqV- zlBuO<_Mh8FK5~{cMONq3)&j`hZvfrt+b3M3931yW?{)C_1a^ zy1Xy#&>ksG7MLIJgc@qqI}itSsf~zj36*=IxUt^=mMJKaiR}wekgOD;3w#;il~VR4 zuIGs6G0_fUA($t1S^DJOw`8O7!O9-qKBvid_05eo`$aAcDikQzZ1T0!wZBwzL@-Ov zofF=&Bg|z=2|KaQwvaquk#4U<=Z|tnrI;oYR2sQoP_pun9eSZM+H655S0p!TL&#-a zr9Cir;8U%)w-EY`I(*JBDp*{R-mY)=%9De zS)E;9J#vjpEv1dd4WUc9Nyw)y*T-GL1u~bWAtJcyg3V0j72A>=pGBjX&nZGQlD1zG zDC^xlbGGRw5n9}7Zq8<_cDAGUroTlx2w{3ok%=$XD#_8emGPc(Xt417UtFed9lQhP znPn*aGQaH=YoTdk9XRWY1tCS@~i@SH;kE)mWo@J zTm_P;w%+)9f17&`6EYLEjy>MF-|KxKho_m>FDmLT%@rU@ei-RoTyW&cr&x6N5T_$n z4fgUwB<}Oyn=y`g^|6g&X>Lw=oLT&JpM3w`f8q@^%e$C%6l7^9CicK5L< zA3k>3a<=rftB{_na!^~U9_!UHVDDmgf3)hZ) zV2;*2*eJ@uMDKXqs49h0>?SS*SHE=2|BeC!+oSeY->APVWObV5MtnZcJ#$~e9xZmh ze`tHB>0o5xLK;u4*FM!clYAr*^;{cI+IHv@sheJ}_Pm=IF%C8?_R&~=)9( zX}9cHigxS{ey+|vCm&-8qVL#?xqeU!+c%v-^?AWZTdml@ZhF%!829zukTC68@=y&+ zI;7DZl)sN74g$4=^RL-oA>1Of7B}lmL8Y;Ecc|;)MDHhMBcSqi9#6aa8Fk`!Y{qC zGr$yDD7)hhZ)B;11}c+>G)3xqaDiBSq(7kIfcA@}zEXDn zXM>?h=8;EQ&Nl>9t<`>}`3uzPwo1#nn@z)+%0**UHvSOXr@Z*7-%}mh};SNdHrvs>nOn_?0Ay`Tdw5pW-zlTf2YC z1>e@RB|IU1nY+{Il<|EE#P+^kS&lsKoG@xX*;b}&&~n_EniVcCF%+?M& zfx^heg?v$@z%q^KY}7$@O2xnV_A|Xf*QI?iRY;C1qVQspRfVK##qCgwl_uU>WT)UN zsW{=PKdJ2c)_7qyM-`pqwlTd9Ps_H!Deg8TIT70nBiRhqzkiLp+?;7zpZ?RUi){i! zbuQEVbfk80oB8}HmuQS8;YCx2Qg#sk*rWo`xuzz$CJy^D+t|*~ogp3zExtLreoJJM zKZskYa~T8Ocrouc-f`PfOWDwqmOJPJh|j>mnv(XVHuag}xZ`b`;r2rW>62_Z1CRr# z(4HADlo`Qq%Is_;5vcR(bMw330LCF%{;Q3h5aVKF^R1Vu#n*)$6Qs0W^+f?LZGh;q zG1@s+#;2a=w&M#QKlmX&>W~X`MN7_Qsm0s1o#)>aL#S%{VP5XX!H#FZ?zh0E4lq?+ zpEA9B+IQ@o_=m?8<7394O$AW!!12!Ma=|0I@?D{$p8@c*g1yC~p(kT@fXh!B3pcW@ zGNun5&SPbdJ^tJ{Z~-k2FUp*=H0P2OGcEv3^{YaD=*Ak5p4g?U23DcmTgzm-pJ2YS zO67JPuR}t)t&%?P_WG>AM$1}wNAeHs*u=FNS`s~(cM7i>VLu`=+w43IOQcm+SGS7k zqY#`mR#%vblKIl-Ro30+9V)0?*VJy}nN1)a&m18RH-1-7^iO1~r%|%eG>2L#TtSA( za<<)|2E_1wh8d2d<|!^(&W@bl3Z2o~o@dp$gMl4?Nu;bsFBsCG=|(+X!5 zwMLCAu|UOPXO|%i7Q&aup$e59E-Z3!nYJNX{OsnZy7W|=^;nMD-+2mi|7>#(_+z*+ zM!0zK@-hMVb_k}ZJsAWZXGW}^?By^KY<6mRiLnS+=2-vYjk&M25!qq`lsriQLf#n^ZrtO830r$O(ZIImDR8V$oX1en?D2vInqfpGt z{sYPmYW=lk97vb*-W*Fg%?$oU;28 z;~3tfvgNH@6*UqRmKCe30(DFgyg%!ypiFZwnk>@eZLj2X#FQYsq3rZhoJly@VU49@ zQ!9HjX~4Kjw<|bV{>oB(`JA*d@DNd6d#PF6Ezt+M>MKg&EPY4VT4L%$c6=35bP&-O z(9nCZ8e;Knu8;kDBlL$_Z%iOh-F`R_zP$HrT2`9&!-|j$<8Z(R#CZ?g}&0S+Cty3kbO{nJs#c` zU}*g{&ac@ZS~ObXZF)%3ljU<`mFHOX5}`2vK?7S1p1b;Xh2%g+<KbVqC6FfU#lU~=@@!i83sVXI{YQZ>; z2*V!C6L*8P=TFPD1k-14-q^yu1efhOx96@oA=l+Cu-)<(ibd_XQ?+aF3`jqJYJYoJ zZINAkRA-N1H*7gf?^@_h$#69LKAwJp6SNWLVHhzn(1$%jW4?U-v>Wf~1^-cRUKW|S zFDPRh^}LXtTN0d%mw7*FZsVPFuOHcjQ|jH_|JFYEu5#rO`VN}7CI1^>EsF&YG@bC< zFBN$Cm}*$E{uis&#bhJ*bgMM0SE=Pknm6icUmRBjjh*%%zbA(DnAnd5T;0Fd>`5vL zQAaiJ;*fL=YC$_riWQohA6}WzSLyB~aqnHjDiLRuLufASyC{0S5B?%uaMYldKK1xF zAYj@uBv(=AAZo1qzoxeU@=>LB0D$<-k?hhNE9Vv6zX9ZF)`QfmZaU=4u-^a^%5T7m zdt})@yOLYO(~$d1vD55(O!AskrYJ--2f#azY6{T;2nVT*%FRLPr*?c1bp|6e%9|K}P1pE}Ez58HPxJo?#p z&Kpvy_>|*Z_Ts4LQG~3-Z$NnV3Vobv$QuA)Zo{fg&nnmIRV1UjR4M*xH%618a!5D_ zv(4`B!+^&3SLb{ZNps~!@K0$OtWsM+5U9zQHd`8iv|meRiIobll0^i#P`z`^GA6!@DH zHIGbS7SWUK)2DBX8U5b3+uizv)7+*U|CsafIStSgbLna0hY`2WF!yh+CABf$JD%hqPhbS=%ddCUmGUgiG=>l|FfY!L)fpm1UF06lh%s zi?-uvG_f309cb>zhO7v~>lzPBtG1v8XnRG*Iq`2jMWa28!>dM*e@)C(2Mt3)Iryet1AYS?cs_SC2XN4_X)Q%F)> zbtS65>0;YAKyVwKf)Y|w;%)oS1#$K6OOGP2Ix%-v-R?t{pLI*b#5`^76fmY@vQN2) z>GReSR<_R@#!adt-$`{pjrOA=s7)v7+G{ClYqzfFN)_4fcYi*C^}LpDjxH9^^_f&L zJ05n{tG0DTySn&Z;r_mqUZCfYcExio^H!y4Rm@18zJp_0qGD`J97LFtYOHkhvWz3C z%2t51GNxYT3cPW2xu4d(v1(ALqKC;m0Yz?d_;Y#*xf8#8bmG4Cd~5N749nw%e+~T6 z&{%u}O_KR>5k)k!nne;EWe*}PF?e#jkX!QutQ&!FXVc0tuqU-eq&pB)thZ2Ru7}gh zp5=?bR>tJAeElf+(?m)fmWvH4oZ(4^Da#8|lK`cjP1th@<>v3WqTz=87wI7vi^o2m z<+ehGU@GLv_+Nj_G)AoQCk^X0o~41RI!fMq3_Nycf#_o(&ol)lSnZLPx>i@-wF^bW zq8wdd7=ax3p}H(q9#5SI^Nr!FMOB<%fdCVY*<}M2=w{=KF=IUzqUAW-G(txx9H5rN_&f zVzME!!ibPWqv;j0Z>8mm;ms#sP#}}4bmEH&(Cf;Gf=VL5x zWeryhdLF#e`|d;t*NoOIY`M0KtnAy+BZ%lVPip@Z2LUbwl3EK^^b3=RegoJ;QudF& z%w%!>o8my9{Z9gedj;w@pruykOiAKAgYqroU;Krlo{J@CG>U$lna$2;{^zh)(y4$8 zaiagk?Vz)lJaO0*k22_r3vy5dXrV-At7sJ7Ze}YMDEp>+Ml3 z<}Vhr{dPF_YG+BVxvuYZ%TB#5cWrAo*582O!JBn1RtlrqWUSn{jc<)tK0GtB7T@tG zM>tJd*JXtG@KtfR9h59#IKKCla+|<6p|y{nBjn?KOY1wnIHa|unisqqFRSajm=D!k zFVYk(kKE`VD+utN1pi;chnFHi0xZu9>1yN+!rftHboy*VO#3vIZ`72Gb?#KiO*4fY zPz%f&V+w67)0(STa0vp3HdwMr7C^#CzX5RbU_|hZ-+)=iV|mI=)-&p|`9D7YKP34- z{}cav{{DXxB%#n-g{0yObw zn#{ZxtM8*0RqV5*5Ko+&TT4Bz77Wd0>7L2RA+~9f$o^~@i|if!Z9WqcF-0`ZHX^P1vC8e57*%dUu8t;f%_DPhsIysEOyGBg!=38Vg);=U# zrOZn}MSMl$D5G~n!%b^b`DFC6q$p9L1$4=EsBuSx*$QR(Y&m^0pW4AKHZ@HFE_7&7UU4q%PVytRRK$;`vpH)zvGY0)_$2#&S}whQ=VrIKh0!y4wWlG+cLa@7Y(XVO47q}*t6C9m!%LQ!|~D>q;zU<&K44^w+JdVjW~NUTE6a<0k{M81$Q#ixA63*rt=R z^0JGkWK-THfx6}VcUSoW2twHX`~-eJh`X+#K@)By&3P2!P8IV-n|FlP;;&o&H0 zk&9Z?t>ZU!@7QCg_yo{s@`cE9ERyK#p}mGI6p(zQ-Z(?1Njct~LKF$@xEIJ+BBJPdk4MJjqK|!Zvto=JANm#kgj* zsTM$bMDL3gV4Q0nFiUC=B5RHN?7kXdQgSu<4J`%U`RH;U#8feO2ZuWqj{Yi=1){Vl zIlQ)0?HZjR?TG3Swg;ms?x{eHjrHQm0Yl_dyY(}sd^CA@*Pzw5ke;F7P?>1?(o_Gw z+fp4>7R~OkjkR1pBo_7E9EkG;Q!*DH;Z0w7VFHDt>@cWCHhFJn14A5=szFuwe6D^n zZAz!Z)3sEFkxyLxvZo*PXI!`68d4!>hWAr0bSl*IT9}nLp60=5C47zdkYRnu6P&nB zV^IJRHrqw&yyyG%f}sAbr$f6%O+Gv|EY4o|mLU6#QNpbRPm;c0%hSkekqUS@3rLwy zID;Dz@wpC8C09*D1fNWQX4m;;IFx&3xuUhyQ7Aqw8q&j|dgnsecE~05hq)8dy`@ei z4r~u@OZW7T&48XVF9QG6S`)4<6(aIgunp?E1(#XCzfsYX2Hn^qZ>|{U1JA(lijDOu z{z)+IIg}{*3|WW!DQC+V|325Btq?8zlqHDZV7p<9X%t+C<9N?0N!n3HSa95$0=jHB zrj{tZKHmEpwqG@DvWhZIcSr?M=00yt1mABYS*bPq6nv=0wXLn+n`C2v@AZEYr>Y)z zUcHwundux5-@B)_h$B&4tR6icdb~F3LQJVe;j1~D~H-DulO&3_LEELpYUti_8(bR*?*!h{iJ_p zSwBO>0T=IC4*K?$liW`wLR2#lvVZuV{EKb+Muvq7#GyV%CpSHxZ?au@yzsAqKiuB{ zZeA>5BH9mDtzPJjT^hE%bNYtEq{&qTo&B25E=0SY`Nq>X0F#q{fv+D4qISOli`cop zA?E|7i#y)n&*GG|RM#uAAqH#yUxl2|@nGNZv?(jXsfAN$ROn5Tv;&hs&ydqQqp6uF zAtQmAK!FnE9TjynRIhDN{qFJ$XWH@Cvn!>9$Wxv9-iHW6&5wF8|Db6xQ4d%A-`IQ4 zsHV1d?;CY1R#1v`P^9-Fy=_r?+w=}f6+(ajp_i?oNbiJ>(jf#4kc3cz(n1JDS|EfX z5J0+s^s?XV^Soz_d*5e_^XWYw-sgGN7-2C716J0&vLhi7dJ=xJi0d&8nJW3F6;a7135f@k$vF!QpG2HIb+R1 zK#cOe+aA!zho3A!4N^f!U_dn%F-Gd`mx#QSf=^sk2Gy57 zbi&8~$b7w++l@UVobF$+>_xS?O+?6?TrR3P`l|JoKKpis?CSi8vSxVSeeu%W@gUjg z5_vq?*7w{g^0RdPDVAa*vkkWo`n+)Fx_IY=RzLE1wF zzBG3u;PUw6iI~*Qf3_j~DocKQS<2BRIlprxL9;RZ#9KtSM0(QP!IDmfJI`X))idgL++`ydE zPORSl{h#ToyN@a5j*HJkcA@Ln3@$mG|1=pxalO-3ot%|N3JK0y>Dx+?4;df91cUr= z`ls}ohR-X(k;Nttsg{O&l*)VrgAkJVuF*e6-Iw6RrX` zq@TQyFU!g|XB6?=mn83-ap%axP{)-tPhJH$$NKdVBj|S%$FknF+j@7joUjso!o|nU z^qQcPm3AO{T`vck-6*A${*YKC{6zW5O9LlA9Th-t8y&bM4XqT13tly=4gB86%@joc zVF{RB*2|rb@b}nBHv`1OaW%jpn(=wFOPAjVURuq8;&!EN2KY+leujI#Qc84d*oBM$ zJPGbz!$xG5CGbN7&yS~@oLbkNxrs__tx3jgGgFJrb+fIJTlk}GJC*Q*&Y?Nwz=mY| z*1%=G?7u-#b?vS*Vu+)fhW`1``09vB3Sr6KW?Zd+>No7#q%4+Y{nl5zbOW<*N!XK= zUO_bDghjWBMxw=LCs(Vvc_xfr=P3zgayEf!f-3=GN0zO+`8hc^P`(TKF&s_yZgqLJ z8SjT|p`kQv&FW|78UYV=H_S9#%#agw=1Q=3r7h%{SowIlhc`y%uqQNgBa!Lmge{xx z?pVa&3;sKapODgh%BN3W8Y1905W;r@>cO7$!e1JIiy5#1RHBT<6O^{~*z z-({!;VOG%%$YS}PTI zoe|w1s6x0$LbNAXYkBSTg^LXlf@ywCTKASs=hPa4U`PvLg_u>V@O38n4>(D$bcJrS z@#o~tc3zmq29q%FQa;5hH#rM(1~dFXxT;0!OVy! zACH6*EqsYLHUt(%^GaX#eN1A}+#JwBK&&r$ZIOc?>7I*3>j-b8K3lR=xT?4NB!@lz z`_N7g`c5wgM4>j&O{CleaqiUQz{L}7@A5Fc+utr^n6l*ge3FM$PtW;9vh=dBZq@o> zHD{u#vafggGYA>oYqxltjcFB=KJ?T(bT;2@G+)h7s>99G8jEza<>*IuXZek}yj-g) z#0exm1w<6;&QVN<5FcG6Akh2ey%hNN$j*!NY?)h{?1z(H-+T+>jWN%ox9x0n#4dy# z7I#B9kqK!UdcwM-2o$n1u&HtlCWb`4@YXy;DL(TEw$_kI{xWS>if1h;0UghkPFTAs zurBIW&s*U7+}PrhQSW@{HL9}emtR(AG<(hJO*k_OEeKJ35g(}o6WWVQ% zCI(nCIk~1~zga?Fr^wk<-8AA49K?bEE5e9`RHErfjWMtDeH*bmw6T7(ZHcPfz21{_ z+s%fwzrUHV<>LS1KU;m}9hoj~-m;&*4;Ksd9Zo&h9+)sF@LrqrvpRp#i4t|q zA%Iw0?K13LQ74K3k&@twh&xGSVFaA_^~uc%tD5vGw6XB+?demL$|#$h+E}X|TWZ&# zYW;u|JFR@rvPq5j_l86V;{oLXKgt?*bDq4DC+cZP*K)X>w(D&x00o8^ZeUC-^+m(gLF5(rEy=zF{5JiRwXxq`v=0|ulob-Dmflij07NLr^2L8v45$QC;+d3ov#FEqyc)n8y` z$VO1~!}7Ov^oKl`{OqAIodB|vL+{|{#0>X`f3p4Yt9*T{=dzYnT}^GAe)qWdJ9dF6 z*wVZsBQ+8c4dtuH5WMY9)W9VtsvYT3x|0mUOIDRkA`@<#%WPj=e&{)a3N0m`6T#(H4`sCVBb(U;}hMCtGlio%HGhptF;=v{-co7g3f+)^xfjpi}8)g7}r_l11 zk_`G3)lnzTh*^utz#GsngALBe)dBM*Mm?^D6xJtvo`tUyP7;ef3^n=c<1v}-R_{%% z_7SEf2L`(3F4J=6!aiD9ivQ7G^_jnLQ~3lmPyQE@Hhl-FC}%g!-Rec@TI5>wnI7kn-5yYg7^JDzDcXQKQ z%2v|(vYsM}Kx>G#P(YRx@E--OLcsu1z)$wz37B`wIsx(b>p#F9ti^lOqfRW@j`e=8 zy~eKeH|TRoVjXOsq%7yekc(t8o@raVu1%e?hBA%`YNN9GO0sR1Gs+%PT=+=HD5r@lA(-bi;lQ*8H_-sLm(>xo0-L1-y7-|$Pxoc4-5(8)tp&jF@L7lK~zWiafz7owBchg`WGlasWl4Hx?}}veeRUpC|ZqdsM<7r_(6RF_WEr- z!;WfUQFuCy=(`?J6^Ubb+4@@MdOtN+a$0r>7?vk!`r0t3Q0<^ne~4whNB|JunBkuw z?f96O+kaQ&<&oP~Z8~9FI(*x3+N92G4=eCA2AdWRh5RvPMkAgaqMr20_W0`GyRMYu zYWqbAMtdh&8WgLw+(Hd-vw`py%?mDNNXy$bz{Sfx?o1L33P-#r3?adUtSHXGS^AdZ zPi||ox?^8$EQ&FKk>#8%wi_flwNuwO!QqF>r%8)XD)QzkK^Xte02^2~)(3{Do9zk{ zeb?F&PF!$6D}D#~f!aBUosEd)R_hReu3=hzsHx#TXQPL_+u>!bl4%pZ5AM$TtKMmn zF5C5QwiB!Am?KJcx2#pKEiC8=Y9m8;!~S2d>^w9)UOK545$3k~7nT0(ywsiAVgt_H zsFr25w8+2lKBC*HOKRP0^XE2yQT66sJ#afgoRlBj%KM8d^E3W)0n3g_T^NvEnrAYM zHj4z2__}`+@`tsixI&iL9=G4)t#*o0`JRtqb{#RyLsy^>U_|@2xFYKlHD2PU+Cpqr zui|FKJ|LYP$7bIxwo7JHuaM?OI z_jDw}d^PVf0!i4EIkwKR_=%Y4MEl4hg%yU-a`h(*Kg8@(GVp$B2ZXF0-P6{P^L`d0-!oi)?lu{^#iYgoAuQ_3_XnJcyWrr z{&&$So6o&YFsew=@0j%~?(3k?m}*SU%H)T4ZyZsycpbYHCBNfQ|A=P@@ia4Z&8Oar z;wW58`S;(h{zquzSpF#$iECYTUpL+;$Fw*4|3y{dzqWxj5$EVrwK)=ZRIp#7IAgMm zgAB9UgYF30m}J7%p%i)?<(nOQ$Lj({9ak|l=*oBcmw_YptTQlSx1F-Gur5sTI@Sac@eEb+zSXff+TQF?s@-mS zi2(7)XJ>sra(V|9;d@wne(62`tCZAHkJ|rt(Mqw#2UoP%wQQ%8IhLn?QLzn3)W*|` zcJB$4*G!vSpwHlY({3GTaT+XfsWEPnJ^RIx1J_NY&s--z!933kvx4DfUI@# zk-uCua-vw{)IK$ax!2)35rIdgiIqQKL+9@PI2I$E?uoO_VL4c~m5?p<83>3cpm zG5rOaxe)c9WBW|!xu(iIvv(ex2jxnH!VO)S%l|IApQ8RfJ~ej#A4d1upM`~WnmdO2 zASR!cupLf9%74wPv}0wJ9v4t-zbL5_pS~IC)Nti@U6iMq?})J3^LQ`36fB412DL*GQf=$E zr5~#cYvVOO$#;r{K&@SzU0tSlF8Cixrv}fB&s4Lh+@>juY-oFQhzdWya<$AR|DYPC z`dMOrMSd(S`f05&JY?LTWdJUcHqRrkzfh-^@%te98OF9f{FB?R6pX7jo>NYpuQ=J5 z0-1*-V(>~ww(}TLxJ*U%$GbQsiosQZixtz&3|Shwd`c2xoTk}4-orwDM8JG!-|~G@ zm~V*w30kE8L+{1bNqYv1>UN~G%jUqK2Q%4s&^-?;NK+OFU?=gg`$Tp~9X`jK@{VytdH3HY8WQrQ0 z@5fSSM8;TsB{lmo2<2zTB{3Bmw%>a4JZ&S^fA}YwYl!BEcxK2?b`Md$AueB^{%mD8 z(Ivd)R*IiFluQcIQ(|iajrmehDO{VJUjeSuA4cW;MP>I!@|_&*x_i&ed|un_Wm-mX z+lp|KyKR?MM<*zEW_F-e-%v_tJQ0BBFhswJEzZ$Ms%|hL?WDX<1slMaowR_U;V<+E`|GU=%3s$khgb{^c0{{+2lj7Al}c~fyYN*N zPt#F|YStFdqVb=j!n``n7j|V5p0V368t#;JWKBWe&+*nAn%idF$@zOmf@NxYw?Dl< zzj4hml6rmr1K+z@t5)H*#ns9`FnWHg<~t68PE$A5aBz<{f37K(hjY$3ny8pbtH4KF zEXd}SBHSwzYaJ2r;@0_F&;DhR|bn4vf;9-(~0@&}|$_`-(d-ud1=hR$lM!zcAujD;4 znrIpD)!fEm8S2g?*RGW99iN3(dOoH<9;RzwM0pUNd$lKxUwfx`sVBblK8jbg(8u05 z=6P#+?$V0MdXKB$<|5y11vwhXXk#5LDnqyVWs;5O-@!#&`T^#amSck#1CggKkyX$^PKBW zbQ(^oAP;yj0k5^=Vb=>Xp%Z@y6KnZ^d`2mf(Zkr$k{&b<9FxzVDj_Eu3)4#vikWy` z=*5j_Zj(R5Xt`vX4(Sh(=l6x)m7Uu|t5N*uLR~62f&V8rpN6{U> z1r4mSvTF-KTbovf6EaDPXzJVMHq*n51Hw|nE>4nxgsNG;5IU01gm}p>?^d^?kHpVz z-y?4D{zb*;tak95+-ZUn0UhNmSF?EyF@_DKrjf3&2p02NVFPRizaum(Q>q-+jtP!Q z1j>P2vugS)0jQf4%@%@R4?1JGGd>xSlnnK*{7#nQc+;h?kZ?=8az_k8Qj*e{WkZGL zZJT<|GmlDF)$KkthMDSE0&Qwa;;b#JN#^D+WN4|bEQSBMw4J-G`f=!~_#^PPT9b@y zPpl2SNLPdAy3pW;t1=TR7)Nro#i3V93V%#!=G z_?{*btP!UHnAY@A2dGMpp7xNQR05fkrt`o89By_y{h^k#k|!T83}H|*;IeRd?bNt7 zOka&R4Q+jW=M%oDlKr>u2{Pu(K0*kwiW~zN*ORwxMIC(8KEzqiEI8SrqLbtjCS;Iw z#Or4kiDk4;g3ja#DZn*Mkw3nUU4sqNwKRHhYP@Xo{6PEzxqDJ~oj)LZ_`|#2!cBRR z39qblBB$ULHYKG;1SYObY}%{G;aejO5oY`Tf1i4+Sa!;A*U@6<@bEx z-u-GXYQYC7F7m_zH;c0J_$jO)ne!(iNz9NkGD6A|-I+(Bl{Q3MAC1obvU12grOp=6 zLoYO?w=Rc6`8fQb0L#`4{&)~*#5J=gvLb%v17vCoM$wv*=!go6rny_y$6m;@jPsJ+ zAYmg8FoGh@_>||Uw>`kGs<0*;zuF36%2}W!p2`>iJQO+omVk`4*Fekd?ezmat98Na z&U;#(x~TR$rG9OL?2a_(gpO<{E>{`Ao-|(UFgjScH?m;1n7Y7sNsM`twyP9LrmodhuZkZ<~-GZQ8nbTXpVge+n9A6v8~F=c3cui zKN_T>y8g4321sai12cg=gUnmx#aZ)fV9Ff$hyJPF;QNN_$Kfi=rmt5vqO_vFBwezO zeKn$a-@_Gle>!fHi}?iGZsFl4f^^#>C?YaiH!X%mI`qD0C_Ha9r3kZ4J|H_nC|r6C zMT-Q77EhsaQ)IW#Z~v7YMJ{luEfDGUWa4+)d*F9Bt=)6h)1qdpq>tYZW$gr+E<{!9 z&fC&s)@^kOB(eIpSp!S|I{COS*bR>Q4> zC1lxl{HvL^(Y=Q4RI8HDl ztX&p*;SCgUn8FKI2TO$*9Zd2b%jE2FrM{Uzd05Nn)#_6Q3vz5z0BI~Gr zsf&4R*pu7K15PPRIV$bxRd#C`=IbT5Vq&SN9#LC}=i#f}r>^C4jn(z?pHQk1mhBnp z`Hp3u`*rbrU+}+2$&?rzl|{HP>mtP~qK zd*T7&LQHjuNQ1(+kr1PzyMG^L0H zsn!HvN2$*N1n5%mkk^zrTO+b&P!!G>8Zdfx6P&eyzEkFB|Eas#oI?%&GvxXV-@>nz z_b*}4=KjxZtX-=NJixPqRKWV0yrjnRloXfa*xL)8TLzsjg?v4i(xFom| zwY9XR91e$>F>Wi}6kqd5zph*WvD2gO?b=*A(yAO{Yj8#*e!%+U@ff5Un0}%dfr5Xik&kWe_)@GC9Hp=jlxbbkG)-P ztKPKVue58+ge<=QH5#7LJ!)*b1mFB>ZNP;vGqAjE4t=q-Oy3WN{zb(Dc(D~GwD0O1 zqyV~nV)Y`!g#2xBrEl{B44FtKc%%oM&!r!gl;9?S-jmFvErwkop)4N_LUrH%QwGD+ za&X@)#c0!GNX~4?f}25uLE{{Pt|N`mCr8?i&nqZ^X;bqsC=k-VlsuLM0D1r?EugAt zy?G5jK5wT-A!jjI2JMI| zaIZFG&hSXhw1aXzE!;6+d)W&!5pGX7@??L&%XPom5tkg~Ew4h~Q)rqFR6c#BxvpaP z7>08`juEOGal=>^xop2K;A4x!*0cnKC@Jse8?lAz=S_?!WG5EbPrjRtI}S$Q46iIM zs5atz27O=WLP@c(ICE*e=rd*$wYD%(N>NL6mn{p++90Q}c6lBP3N^ybnO(f>(K>E& z&lgkf$~S(U^?YBpn4J1Gpn?`S2uX8~>$TS@C_H=N?Hw}&%*Ig+yCOsG89otxwh9n~ zK|PzfCZKbxEz!v$R#cHQKx!=F30Qr9blWK~MdHxq4qraRuk04hiw!^q}=1_Wx#TMT_&ZpBL%hPEB2~mm4zg#8Y`!EW1F*Owk@ACcBDTL{>q!| zvD(TK6tiYE+2*kmR2dpNk#{{e;$gDS<@s^-*2CFjRh55j7&r#;#|&4d(3~mx;EMmI z0U@F&cmSif`D({e`NWA&z(l(dG3GU zqpD9c59q&F3}14c|3#H@cKOcyzkLHLGez{OMaoK>TOEA=3usfW^5R&KVx^3wWY?B1 z|DyV2?)Jvb@gMz{^3T@4-$HS$p{$7wfQpf7(VQBl|BV3mw zuP`uq?RNMSJ`1*pm^WR9o4+y3Y6z+ z#;6UNc9N77){7}4tr1$~Ogn>DTAfq;>6PfOW;cN9P9k{oi}E(xGWH0@JIh(Z>#&HQ zR9dpH|JQ^4-+#TP;xO=HpzE?d!jx4tD?N0AojQxDj7VBFv@H(}A0XVS41HY-TO)$w z=0gMRflV-^BRHFoJ!@mB{*j9D^_Kd9YD*4qyAq{Lmt}zvG|7w} z46Y6?=6K;_g6|ysJ&yefi3M}|J?gex(LlP7B>YH?PJSLK`&+B_io_PLe?&s1#6XvD zI*^_@+G+~9Q(3{)La95|7$}lqK^VNR)1p)9h5FB)zP1o3_Kgr4#SF>^!s6bpZ%UR{ zc-gy>m_+~V1ic@2VcfmT(p-dddBU9*a1uK55I7#>S!Z6Kpo!nPT)W8O`|{;5Tadm8 zAVPXD)vSC`FaCDmr+zw{SPx%qN@@_@<&S~{uCT`so&Z=ZXiFXJ1{cSHk8mUzsB`uP zzboUJs~rS~bOV3AqzLePD);Yh2syV&4LFF^D5OF9_t4g&;@BA=LvD2_L@_lrZJNfe zN5Q)UU$sLYNmPuA4n?jKrZ81&u44(T*m65*095{YPrPzJd1(ut#6L3Le09va_AH># z(eT%bzXFG(%nLJ^WrT{7S*_vnj7_2X3n_9eC(mK}w4zdkN60I;bf% zh=rkPGr<*cN?+p|*PIgL-G(3c`GC+&(-wWyUfpDP06X+oFYSQf{2 zj+8krmkW1j1Lvt6KVL;F-(1t^-MWycg*wECJEsjmWGfmr-0K-(caF9p*`Y3hqu6XWP}R#QtFyR5t%W z+WnN0?n0bf@WPiD_{w(IS3*Rxg#$r_N&~A-ZD#G@qQ(m3C(-@Nd^7Cgf9GdfGjybliG zqG1vt9FQ2|-Uyw_7yQ6a{fb`CIo{|!d{0M!rU7s zkQek=ZwKa~=V3#L9b*)CB^pi~yzwePNd{g$0rph}#6U$YQ+TgJ@zr_-4$}{Y>{blN zy)h_bMe#Htn8){ay$u_}D89gXOiX?Y3zI3acTI5jmf>sb^<|WnlSFkjQ;gz#rN)U! zU~oAv-%gT^dbg-ARJelYJnw8J>|Uo=Vy+D7|{-N+K!zb5YBAKKX14ljB*Z=OFo zr}3rwUF=-|6>8I{ZDnTxAE}{bs!? zBrlDpk@q}Nj%0K89(({t^ga;I>ce$AgHBY9liQPGgJgv7F)9dia3)Now|Dg&$1xdf zRwn7xplo&7Yd1H4A(rKI=Tv6t!pYFI*%CLQ0=An-!<*ta}|W^H2C5 zc7M;41X~DhoJ3>`y5g#VC}IT4Is!t{tHt1}ZM~5j-sK)(lnJ7oK*%chuo-RoX@=Em zq8+1itZ-D@-*jew6M#=)w%A;Jg%74X=kbS8WvO8 zBn$f*+6qZ*?Ly+FRv$&ezpbyvq_O z*PAhCDa9l;w&+c^`oy%7=YlYF#$@>3)?A}QoN{t_bk5NaxzLHuTc?kcjdTp#XK-uM zkLsSht!8RD9+44PwB=h$?nzJexH@WZI>?xglgYN|kxOqG6Nzxv{kYZn;DqKe|FUNXI=}3YQ+@U-c$^EJ)@@`?9AyU1zZ&xpI^;!oiu|9*N>K zc?gr*^1ZwTZ+qE>3H#^0=#DlQoaG3$m*)NN*{<#sj7=MNOl#=Yiwi8zZ)~9A<}v zjnC2f=)4@cwd)?Y*S<0q+IYWj#>lNzBM&fLBDe)h@Cs*bNDZ1`m@&Q9yYL-e%na)z z2f67=C8Yu=7T$+En{xrhnd@4EoBlFW!K77o!=>NGY5HbH4Otzv?dExs!Fv@Qqie19 zEig{|9R1H4m!4mRPIPV7 zSSiMob`FYq?=9E!?|VN7mA-H`%ykrHx|T&gFNz&3Uh$LAX-`CY&qQ&2DIfQo(=`!h zO$cbQR&_W%x4G&dr3VX-{XPj8w)jqe-i1>@6uU_WcYD3>A86{$BX=i_9ghdlYx$2^ z5X&Be!cEo5RlQF=+mF7VuAjpd!d@5HajHpz*QD!L4qZRb_&naa1%8C9aAxb%`z{|P z!6(TiCem>FbzsmB>*zGROte5Aji#Ei<`%pyASzigVsoM{+*CKB|vVK=F)4o>zj zpr+U5`ud>_TPp17m%&(*B=imetD5L6WZTT>g7+w5z#`o*NB7txuUP=Cg8Rk=okag| zzJ{Aoml71Vm4iPdoX`u|&9>7&Q@U@UVXEFehS?AnIrDX_;=C5&>zzVwlb|(Rqwh=} zvEW5CYoUVXWhL1g-x`4IxcKwN4Gsq~%b8M60T2pWQEs^(H&mpvbOwkIITAjsDR7uH&RyEfety-JKhRzw&T8*L zI5n#}6L}D1_9zRXu$D*J&nVCGcSR{zq!n;qBpk;bzWnC+PCnO+la#XQ*cH`uezahy zOI;k`zTo~kP{pu-tx|ZUf}2teAvA}Uqj9ChX0V>Gts&ewo#L>=u>3hme?G=NH#yR* zR3#9$&>^%Q4bGb(Etik!^^a#=z>56*NHL1;?;Fnz=UE5|xx;~h6R`s>5ZzX}1U~z|2!bY`?N#V~mrG4vwu`?>)`BU&DuT|?mx{+g| zwYAA-GVE@zUnKL^!#3sx%!9;Ez|<@;-JNcFUSQsuKu%a4|!WcUmV=-^}l(1vP6R44=seJ|H ztL-at-fBF>~#8hx1=o_*i-MszPv||g*Rrl zP0xxeD3nSIJ7_N)qj>joZca~-Vrr6wn-F|di$NSICirR#!BSzbUxN(5h#H$CKEIB) zq%+iPZ{e#?r%Qn@3QT=0oC>?2Rz;7?8y%kWi|2H1=%WCDww*g&MJ|Rihpy>FDA%Xr z*RyYuv|RRoj_J5ywBp0z0tfAMLknJAs}{E$`;u&@<9@)B>hWyo+Dhf%gJ^-6x$p0a z@q1fyL)VyIqY^@Ql)8f0>F-`r)3zjgwP*>Cow)8v+#547eH?QA_E$@k&A!e7D;_1T zom*6H_={7cM*4M~pf_bB!9Wn6k)7nd?ql&v!CSjGZ4~-BIV+QZK5fQ(42TQsDnR2B zxg&+Fc|@&!t~jVF2Yw44$X_k)*CaO!YnNci}bjuxA(l%)sJLr-g&~z4N1a=%bqrN z2{yFRZ8D2Zo7+ac$-=v5zmr3hb^lOcKP(>4QEh`1#5`Lxx!JATxA-kBdA_&eg2BNG z*Iy%t7}XJfaqbjouPrz^pM(0DQZ6nmo2d*NvArhK6(y+%bK7P^r8O$g9L>Mu6@K8K zXYaaa6)ADXKb&SbK7pxrN|^NwGMOfPtA-dL*A6nbUicJw_VEVldIkH~Eq zvMb&LiJcQZuJnjOuqM=>{_~;q3f|n-!l*sXx)vUUEQ`;UkHZjE}MiA-u$G-W{ zd6)X|25%(Wn~Z`9T1`8jnnhX6r8wAna|h%s-D^k88@^30$zQ$WBW9-7?y}P`wWhTN zjHqI4Up#WOi)M^=OVB73OW=RzW|(@4wG06NaIQV%JhRzk%vP&(4~fRzVzjaHxc(`9Rs^AmUwYRp)8#&ZZek;;pdWThrN$_l;sg7NI zpg^z0+nPjTo|2ZQ)_yIbdo^D611Y?5v}Ksbd$=Xq#@Y8cd7LCds!fU`^f&79P6SWj zp7_K!U~e|HklJn;D^V29?&8KyHesb|H?5ooo{+?g0ce%hY;7rByxq`ptGGxJ&aopK z1kA3YxL;Jp9i;N0gtO@$=iM|9M>2eC^RsYi!74{d-8%_@*S06{HKw%S9^TaS`YhdU zNS!^hAwqvxLPNj}UBXiIwaa&_`n|4H@adt%`k-irr}dB{tdxzE?H1@b>44te`O69<-wJZT4|Yp?RmCH8?@mnrG%w zjUG>4_DJhz>|ay|8h=rJm7|=$9>4>U;=~SSQbFx3Do~>JwjG6HQ^>D9gK{+6>W6R1 zTTTcNn(8sGKb8?8UL|3=jo}w4B%!lb@vXnMPsyM!%V}{*0q)gKL-W+n_NNNupUHdYk^92uy#qSq6+! znJYrW^%S}i8hw2>?E(9~v&`o|aI&XpK*fkp*^;lj35(yM%i*G-ov4n8P0!L8rnyrF z6miMDaUY3M&C}1sEQ$X`HFyU_A@p$^9rN=AUDBRw`q>v#?Ov&A{c@_7;$D8qgcc*2 zXS^FrQWXp0Ztn$wgPEIWNMdxq3QDiN@@8m9cMomdD>8SDJn#@|F4SHKmOo48NaN_> zDL~B}QFBoYkRg_$ue21GH&9}pu*r!xp@V?sUNqo<@K{%!vqmN1>~3JDO32Th#q-Lq zd%qvF7*!3u`O*5u8zzpk+n`$ZVR-awTL*Wy(N=%zFyEBj;a1VLW^rSWoA0aoi1ZM{ z=bH)c598e$gO%O6;SW8|t0R!s!N>NWV7vFQut1u!DM^8$@A=Pf^@F!PZZCH4XxcIk ze4KY~-0in%0Pxy6XlS=Q?<~=^1^SdvH0)5n{mccmk0PtQB6xzsis(m^MFB>IL9g8T zE8A7aeK$9u5YR@FE!$lf7~n5WCs|*HO7TBPZP&G{tjpX&HSf@NE?MqLNd~sXz7olo zY!vVgVqp`uSYjCsD9jJ4)+V8C#t`mY_Re#v`)Luy-jN0e^lK^QhlHp!`F=Tv<`9`r zM@?hP_f2#iod8WsnI7r70<$-&p@MmUlNqk3*%kzxt+m(NXTsq=@imj@^RFwb#B|0` z6%K<_EnPyO4?gYibf;4_O+#$eJKmRm1F#@z1u=`>9`TyE<{kt_E*IB>DGf5Pp+Ffk zK43J-oI&ESC^t1_{Hfo8->R0j!B@UUH=3u)d+S~Jm30TepTn_XK?Tn)m{3|9nJ&Ye%r z=F0y=p0PTKU}4f15AyiF3w@I8X2QG-6e4eU?bMG~T4mF6+`^>`u>ONtvGr z;*4RveUJt5_Vq#;<{4e}n6ljcUhwwg`0w&|(#kIlB>F4Tk^-UHfyN2PNH0e{Tx}Vua?93qMP9n%XP=G(9c-qXDG%ltxne-zuirT5(o>RxVH&gVk&vUzV)ru_rDeyWVs z%PyuFvqS>wHnN&bgr%^DcY^32h(6gF8<``qHp~&xt%@e#@rWQ z0%*#CH7?=G%483darT&bNhD{8lxqj{JcK&?{}Ko-r!R6{|AG}y%0 zdP1XO+15~{-L=6}=aA?K^y{p zH|Wt|K_mF_gG5rz!REy0L2;MC^uWflva;xD+t8r-rCF#01+~+JEPT5`yZ;CH$=Q8B z|DRE!!d+94i#H*ThaI3}N$8*GAOGWtkpI~WTp8T1ZkKL?n2QOKXZ%Q+D6XPsfQBR$ zBy+E98cv1}=QiQ;M9Qk$!Wgk9Q9oj?9gb&#u4udDY*dPRg|LalD34oe|IQ5b$5v7b zxG92saxbpnHm^Qs=wp@-$2`eKdL8t3OgEPXnR^c_5ONi_SP?i$)Zv5>ba7MN=K9w9 zGnd^|^fkE=;HOtDj*^r;vAM8yK^5NqcCExMJKp6wLY#>?DuaA%HdYZYjS~1G)eF{) zx`)f>jRFh4mK!v;7c$R$m*Q;Ui}R!d-`;fFW3$)Rce_wHEO30}Yug520*W`6fA_sN zMhiq%m0IQKuf2G6;P7QP(-3LTD_>D=-jIL*YOe_c{dr%TDAP@*h2LJOd@$GMj`J4r z0g5H?Rwa$0hmg3%pGOco#MY(-m;8x}og%mfT2Ug<9#fd=)_`=#)0?LxmeT|%u~MY( z+0Nw9Lg%c#4fk8<-5Y*PuG1x;yJnsZ-KkX;wn@(p-m3n*(<9T*WX_lYdXa_Ov{{*) zZjZAo^Z)}_$136PYs6m$s+n^14&pVp1$Mn6|$mlN6d*yM&KCYCPOjkL zQuA%iQVIT_{lNeI*YEy36Yo$fHm8MNnnzimHhud^p1-3g;XC|Bxoox#^Nj!m1Rg!& z_WuBU6X;$j*9T7^Qc4a`H}2i;Qs+(Sb!0{Og-5)zcC;K41#+2BTl9FAL#+wFOLuRU zo&QoXj{0$rcww=~1K+}Q;p%(qZX1;i0v1Z;vd3bh9s>o*TP~{-%$9JGiTbd1jba40 z0-~OYY=Wf?34;lQ?9mN+U!PLiyQg7(!eGDXlx!eCHt;MC_o{IR@3n@}OP(7O8gf2l zD1WtRid|wzvdR6;If@rkZ)cOhmIVaIQ$kIY2ubMTl=q+4Alvyb%eJ^TfCFFbsXCLF zd3YbKNS^1hLT9P77jxO^`%Y@Zcw*7`eAYg#HbbnwgLbE$T>6{r?D+8eww2VTb46il zadtvoaBp`|O)FoR0cgjfVO;90)V|Z370CG0%aRFY=LjV?JeAgv7Vp<%e6Pt-pah+7 z(aT4)(_8gO@EE`f5Y-_ zH&Av}10gco7VR6GY&#<+-x^t}p8xls}u{j?Pirz66*nq8sDI)?m! zD5l(a_{V9JRTF-sxRxw0E zHn?-8T5nJTxvk-SH(uGK+Esor)xVZ zW3iQy&6Q8(2d_86ha^D>&-6V9IsyUzPkYxH)MWPV^}3c_L01tXO+-MNpds`kizrB! z7D^~8(vu)P^i>q3Mp_`!7BCLEu};vyQ4x==bt>{ndI*xtdefm ztuAqSa>pI`Tok$1F|f<+(<@tz_bsum$7{x?%(n+Yd{)B4kI_)7=~ex!F&Q2k4b&iZ zMAg^N^-d490BPjW-b16j%v2Jls7=hYn_xmfSobP1pRAuT3{p^1^3JWS+>~o)U7(4y z-C=o#feML@*(mq(vz7PatKO3HBf?IB4Mbg%R%Ah7MUqZD z{8PkI#e4vwlI#VdXZ`W$@f6(lm3qp!V~S6HFaeqi7<%kd$vlCV5r-SC3z&hxg2an= zRN^u}NkWVc+SXO_7d6PiT7w7ADnBG$L*YuCS z3v|-DOT?NWXq!92cQP7HWPl=B^6E7%O=pF^rj>=c8^hzZPzQq{>M^)oFE_H*Qo8+L zH@c^L*@@QYr`CY{&lYwWTA@`pLItZgPPwcrq~U>UhDdk$?C$P|5$fRncInBIkeJnL zff{eF@@)Y=_rfZFAP4TIW`>I5c3|kn?(M_yls-%Sz~}9>y>i%pkuPbugjlJFto5BSR0>jtaWuY7J6p6sp$1LN!5V4 z*_z)X-`U71l2H583$k)d-v+D~=EP^n38k|;0q-6%N5eWiB`+@~D9H-A_kRKCq4E-t zp$D$$y#h)i>1aj7@xD{D?_$D}?gqtWJHpk(M$|St2d_#Zr(f#Al5an{4*W4g82D0; zeUkm~g?1kD8Jbi6Kq;DH6ttpkyh~V|KDDe@+SHf23+jC7l56vuN%QMi!S<#>ql2Jo zl*4PHwgppr(#^lcNf9D0!*uO>B%q=>ON`_c15X_48Cw)O?)v!Z9)tQ|lgRQ@l#)<5 zWxN%hg-+;}8x!x4Ju+Co%qMgU1Ss=L20m>~(2;I(G}U+zb$8%3y+^h6f|5~5{~?ML zMTnu1y`d!Q-h+UCUD+!%gs4U0OVZp_Y+dhy+bvSxMW(M>LhzF{rOdr<47bE))r*_y z>Aps&DokB~GyV4tyk4EsYVKxyE{A_v3zM@KARF`2BVx(y)VrQqc2vu5ts%T}>~|HI zE#^8hyBAnD5J8ixDms22pBhEsm zpN__>&qk!u&65WjphfzpjV#|BBm?ADwsTtgS4lS)Nh=If()SGJ()x;pYGOMoo0q)v7p=C>sG)8?d_FPw1#%`-oYto` zK~XZxfyo#vZuJE!3|Y+JXA}9U-+C%u7t|7y(0!V6!f3MJ z{7)}~eO7JoJx+RKOc(;zeW(B%JZZ9y4JNfl;Om7Cf?wmjvIic9rFI)Zif6V3G!|D~gPph7r?Qf#`xfjok* zwx?*RJI)U=_ROzZU;lF&D6)3dfO;JHP_yW4{QE)f?K*s8Kf^;n_>p!_+t-L4p_)%U zuNhX#Deb`J6y&Ca2va1=Np9Y%w#WfxN} zbda-yV?5R{Nh27a|4GO`u>IEi|nemORbX)^o#6*aq6@iErd#FBdr2grSzW@G$K z9f(`{YLW`?vul=xBjgo!(mZE6oWY&%Oeb-Q?jK97_qw&?er|nRUsuD#>?lZ%QewW6 zq9)eSq8>NrMDkD4(6{T9?RxRx2MzTc|08`_{o&2GJ2YPld8(;U#B&>CWn^R|m64f& z(3+rdl7~IrDX4iBS;kG0!Id;H+wfOJHl zPTq@THIrFqPq%cbJE048ncx85h6h=}vUGwvdsCaasr z_AjY5kSabQwwMsp)oT!3D>XX^4nCJyN_6_T`%E8Vx|_%%!HXmQV`diQ^gnzMmLuh3 z^8z|e*4@6oVs)XdNcv`!6#s+jVvU^f^S})Mrr|krOj4_nsp-DdltLi%0D>_@c z%ROcZ7^z&Y`}@dLK;5UZfDrP3%&|B5R_uPY4>5$kyA= zo0;>rL<8zM^Da@iqO%A{asKvbBCC=nd<9GL* z;Cb7`Cud1BFACdScrImPV^>qnr5|-Z$l}DSH#?4o zs(!R`;s>61Wp;Dm1_6dPvM;j>5-%w!#$_e=XKL3)aJ3xTZ3gzZwq~Q8#@PUfwq>({ zr;>GCJG;`C-G$~%ukcRI+_LIJ^G9ll zZv*#V>9sbQ`vdb{RQt_j0?NL+Hq3|k?VZtGK2y{->Ml49A^LE23%*N;B^`g=t}ByM zD6#io#j^poJ;XG_kD{Teb+RT!>eE7NGLIVa#2Q|u<*VkWgKrJQNDyvPDsnBUsvBs> z2{!1Bm^j{}6<{G0))v>ME=Z%`)#{2=Tt{s>v&MqL7w&DUg>-VMOT^W$v-#km4`$XX zs+X4ne6Pp5h%OB>j;T|t7Gn)tK6w6N1Q})RZKmpO!43Ir>=eNDvF6>{}Is|q+p#O>(0-W5}FnzZ_0pNyBn|Dk}{8izc z&I^~}<eqfjuY1 z9&?_&-4^RL$z%-@A&b+$2u(a20D*t(n=_@ehNWkz?Nk}i2}G?r6+V|Rsg%Cyc_+{j9|4YQ+&*GIV4oYpc4tNA>Au2 zBU$V+GNXVC^>?V^*2&8@aCXGzLM-`rxXSALZVm@^v$1@BpSoV@V~`hBPg+HHmWQ3M z9f;_iW&ApOk#7?%F8*}#W3%pUz~`P7Nz`wxuu~I;l2B5){joN4NJ9^ROV8$wZ}aB8UkMj~+GF~a zJ_h<#J7aWVu)jiX|NA`3zuv=14#P9qDaa23+i{0YxO?u5vaUC-fa%yadZn`~H=}}Y zD2{uR6Wol|6}*CqcuVDn87p6c9$wrq^dKTI6rIqbz{z`_Yp#kh0oVb8yJXjAdsnln z(ww$!cpIw zi%)j_WL@SgJ>T5;#<9@W5`Q<%ziUl;IZ>&|cTH(y%udIfv1#l)d6#7_NO2d2_*G;f=Y7?+T_9G ztKa`In*V)0=aFCF_~#NsV7KCeN%4K1d4D?f*|$T}DVwqiRJ?;6zNjP9Qx6NBVmHhNxGiYi zj%-oxx&eE}>{D)^x41w$A9$kUPKN1i!*&xbqYt<|n1poi@W=ls;bz$t78Aium;%2zC7zWn{?8}sN=pPOIV4E`LzTCP(8~Xdmqh<~A z!gAgB*=;|r*tpSIl=%%H&TG%ZkGFn!{}0c8@a!jV|KRPPJpX~`Kk@uWp8w2?A9(Q- bFMj03&piHs$Di={BOZVD|C2xbd-}ftrvUP@ From 4c65a8091a5c491375fd3770c2bfa8b8611451f4 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Tue, 29 Jul 2025 15:37:16 +0800 Subject: [PATCH 051/415] chore: base form (#23101) --- .../base/form/components/base/base-field.tsx | 37 +++++++++++++++++-- .../base/form/components/base/base-form.tsx | 13 ++++++- .../base/form/form-scenarios/auth/index.tsx | 2 + web/app/components/base/form/types.ts | 1 + web/app/components/base/radio/ui.tsx | 10 ++++- 5 files changed, 58 insertions(+), 5 deletions(-) diff --git a/web/app/components/base/form/components/base/base-field.tsx b/web/app/components/base/form/components/base/base-field.tsx index 0195f38795..8120fad6b0 100644 --- a/web/app/components/base/form/components/base/base-field.tsx +++ b/web/app/components/base/form/components/base/base-field.tsx @@ -11,6 +11,7 @@ import PureSelect from '@/app/components/base/select/pure' import type { FormSchema } from '@/app/components/base/form/types' import { FormTypeEnum } from '@/app/components/base/form/types' import { useRenderI18nObject } from '@/hooks/use-i18n' +import RadioE from '@/app/components/base/radio/ui' export type BaseFieldProps = { fieldClassName?: string @@ -57,8 +58,27 @@ const BaseField = ({ if (typeof placeholder === 'object' && placeholder !== null) return renderI18nObject(placeholder as Record) }, [placeholder, renderI18nObject]) + const optionValues = useStore(field.form.store, (s) => { + const result: Record = {} + options?.forEach((option) => { + if (option.show_on?.length) { + option.show_on.forEach((condition) => { + result[condition.variable] = s.values[condition.variable] + }) + } + }) + return result + }) const memorizedOptions = useMemo(() => { - return options?.map((option) => { + return options?.filter((option) => { + if (!option.show_on?.length) + return true + + return option.show_on.every((condition) => { + const conditionValue = optionValues[condition.variable] + return conditionValue === condition.value + }) + }).map((option) => { return { label: typeof option.label === 'string' ? option.label : renderI18nObject(option.label), value: option.value, @@ -151,17 +171,28 @@ const BaseField = ({ } { formSchema.type === FormTypeEnum.radio && ( -

    +
    { memorizedOptions.map(option => (
    field.handleChange(option.value)} > + { + formSchema.showRadioUI && ( + + ) + } {option.label}
    )) diff --git a/web/app/components/base/form/components/base/base-form.tsx b/web/app/components/base/form/components/base/base-form.tsx index 640d474b19..c056829db4 100644 --- a/web/app/components/base/form/components/base/base-form.tsx +++ b/web/app/components/base/form/components/base/base-form.tsx @@ -2,6 +2,7 @@ import { memo, useCallback, useImperativeHandle, + useMemo, } from 'react' import type { AnyFieldApi, @@ -45,8 +46,18 @@ const BaseForm = ({ disabled, formFromProps, }: BaseFormProps) => { + const initialDefaultValues = useMemo(() => { + if (defaultValues) + return defaultValues + + return formSchemas.reduce((acc, schema) => { + if (schema.default) + acc[schema.name] = schema.default + return acc + }, {} as Record) + }, [defaultValues]) const formFromHook = useForm({ - defaultValues, + defaultValues: initialDefaultValues, }) const form: any = formFromProps || formFromHook const { getFormValues } = useGetFormValues(form, formSchemas) diff --git a/web/app/components/base/form/form-scenarios/auth/index.tsx b/web/app/components/base/form/form-scenarios/auth/index.tsx index 3927f90959..f499e43f16 100644 --- a/web/app/components/base/form/form-scenarios/auth/index.tsx +++ b/web/app/components/base/form/form-scenarios/auth/index.tsx @@ -7,6 +7,7 @@ const AuthForm = ({ defaultValues, ref, formFromProps, + ...rest }: BaseFormProps) => { return ( ) } diff --git a/web/app/components/base/form/types.ts b/web/app/components/base/form/types.ts index c165d2939b..9b3beeee7f 100644 --- a/web/app/components/base/form/types.ts +++ b/web/app/components/base/form/types.ts @@ -58,6 +58,7 @@ export type FormSchema = { options?: FormOption[] labelClassName?: string validators?: AnyValidators + showRadioUI?: boolean } export type FormValues = Record diff --git a/web/app/components/base/radio/ui.tsx b/web/app/components/base/radio/ui.tsx index 178262d0b9..ea132c7d4b 100644 --- a/web/app/components/base/radio/ui.tsx +++ b/web/app/components/base/radio/ui.tsx @@ -5,13 +5,21 @@ import cn from '@/utils/classnames' type Props = { isChecked: boolean + className?: string } const RadioUI: FC = ({ isChecked, + className, }) => { return ( -
    +
    ) } From 51a6b9dc5764b93b6849731bd0897797a148c91b Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:35:33 +0800 Subject: [PATCH 052/415] hotfix: clear_all_annotations should also execute delete_annotation_index_task just like delete_app_annotation (#23093) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- api/services/annotation_service.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index 3239af998e..80dd63bf89 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -452,6 +452,11 @@ class AppAnnotationService: if not app: raise NotFound("App not found") + # if annotation reply is enabled, delete annotation index + app_annotation_setting = ( + db.session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app_id).first() + ) + annotations_query = db.session.query(MessageAnnotation).filter(MessageAnnotation.app_id == app_id) for annotation in annotations_query.yield_per(100): annotation_hit_histories_query = db.session.query(AppAnnotationHitHistory).filter( @@ -460,6 +465,12 @@ class AppAnnotationService: for annotation_hit_history in annotation_hit_histories_query.yield_per(100): db.session.delete(annotation_hit_history) + # if annotation reply is enabled, delete annotation index + if app_annotation_setting: + delete_annotation_index_task.delay( + annotation.id, app_id, current_user.current_tenant_id, app_annotation_setting.collection_binding_id + ) + db.session.delete(annotation) db.session.commit() From ae28ca0b8dad13ea30158f15be181f91cb890a48 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:36:21 +0800 Subject: [PATCH 053/415] minor fix: wrong assignment (#23103) --- api/tests/unit_tests/core/ops/test_config_entity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/tests/unit_tests/core/ops/test_config_entity.py b/api/tests/unit_tests/core/ops/test_config_entity.py index 4bcc6cb605..209f8b7c57 100644 --- a/api/tests/unit_tests/core/ops/test_config_entity.py +++ b/api/tests/unit_tests/core/ops/test_config_entity.py @@ -118,7 +118,7 @@ class TestLangfuseConfig: assert config.host == "https://custom.langfuse.com" def test_valid_config_with_path(self): - host = host = "https://custom.langfuse.com/api/v1" + host = "https://custom.langfuse.com/api/v1" config = LangfuseConfig(public_key="public_key", secret_key="secret_key", host=host) assert config.public_key == "public_key" assert config.secret_key == "secret_key" From 1bf0df03b5eeda913293e8294c9d14ed1652fd6f Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 16:36:29 +0800 Subject: [PATCH 054/415] minor fix: fix some translation (#23105) --- web/i18n/hi-IN/app.ts | 2 +- web/i18n/tr-TR/app-debug.ts | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index 4c6bc7a8f8..9b13fdc392 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -261,7 +261,7 @@ const translation = { noAccessPermission: 'वेब एप्लिकेशन तक पहुँचने की अनुमति नहीं है', maxActiveRequests: 'अधिकतम समवर्ती अनुरोध', maxActiveRequestsPlaceholder: 'असीमित के लिए 0 दर्ज करें', - maxActiveRequestsTip: 'प्रति ऐप अधिकतम सक्रिय अनुरोधों की अधिकतम संख्या (असीमित के लिए 0)', + maxActiveRequestsTip: 'प्रति ऐप सक्रिय अनुरोधों की अधिकतम संख्या (असीमित के लिए 0)', } export default translation diff --git a/web/i18n/tr-TR/app-debug.ts b/web/i18n/tr-TR/app-debug.ts index 631974edb5..c9a5f7b585 100644 --- a/web/i18n/tr-TR/app-debug.ts +++ b/web/i18n/tr-TR/app-debug.ts @@ -350,6 +350,7 @@ const translation = { content: 'İçerik', required: 'Gerekli', errorMsg: { + varNameRequired: 'Değişken adı gereklidir', labelNameRequired: 'Etiket adı gereklidir', varNameCanBeRepeat: 'Değişken adı tekrar edemez', atLeastOneOption: 'En az bir seçenek gereklidir', From f4d4a32af2efe4ea9416046f3c90df36bb35de3a Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 29 Jul 2025 18:24:57 +0800 Subject: [PATCH 055/415] Feat/enhance i18n scripts (#23114) --- web/__tests__/check-i18n.test.ts | 569 ++++++++++++++++++++++++ web/i18n-config/check-i18n.js | 223 +++++++++- web/i18n/zh-Hans/app-annotation.ts | 3 - web/i18n/zh-Hans/app-debug.ts | 1 - web/i18n/zh-Hans/app.ts | 2 - web/i18n/zh-Hans/login.ts | 1 - web/i18n/zh-Hans/time.ts | 1 - web/i18n/zh-Hans/workflow.ts | 1 - web/i18n/zh-Hant/app-annotation.ts | 3 - web/i18n/zh-Hant/app.ts | 14 - web/i18n/zh-Hant/billing.ts | 26 -- web/i18n/zh-Hant/common.ts | 1 - web/i18n/zh-Hant/dataset-creation.ts | 2 - web/i18n/zh-Hant/dataset-documents.ts | 1 - web/i18n/zh-Hant/dataset-hit-testing.ts | 1 - web/i18n/zh-Hant/login.ts | 1 - web/i18n/zh-Hant/tools.ts | 1 - web/i18n/zh-Hant/workflow.ts | 3 - 18 files changed, 783 insertions(+), 71 deletions(-) create mode 100644 web/__tests__/check-i18n.test.ts diff --git a/web/__tests__/check-i18n.test.ts b/web/__tests__/check-i18n.test.ts new file mode 100644 index 0000000000..173aa96118 --- /dev/null +++ b/web/__tests__/check-i18n.test.ts @@ -0,0 +1,569 @@ +import fs from 'node:fs' +import path from 'node:path' + +// Mock functions to simulate the check-i18n functionality +const vm = require('node:vm') +const transpile = require('typescript').transpile + +describe('check-i18n script functionality', () => { + const testDir = path.join(__dirname, '../i18n-test') + const testEnDir = path.join(testDir, 'en-US') + const testZhDir = path.join(testDir, 'zh-Hans') + + // Helper function that replicates the getKeysFromLanguage logic + async function getKeysFromLanguage(language: string, testPath = testDir): Promise { + return new Promise((resolve, reject) => { + const folderPath = path.resolve(testPath, language) + const allKeys: string[] = [] + + if (!fs.existsSync(folderPath)) { + resolve([]) + return + } + + fs.readdir(folderPath, (err, files) => { + if (err) { + reject(err) + return + } + + const translationFiles = files.filter(file => /\.(ts|js)$/.test(file)) + + translationFiles.forEach((file) => { + const filePath = path.join(folderPath, file) + const fileName = file.replace(/\.[^/.]+$/, '') + const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) => + c.toUpperCase(), + ) + + try { + const content = fs.readFileSync(filePath, 'utf8') + const moduleExports = {} + const context = { + exports: moduleExports, + module: { exports: moduleExports }, + require, + console, + __filename: filePath, + __dirname: folderPath, + } + + vm.runInNewContext(transpile(content), context) + const translationObj = moduleExports.default || moduleExports + + if(!translationObj || typeof translationObj !== 'object') + throw new Error(`Error parsing file: ${filePath}`) + + const nestedKeys: string[] = [] + const iterateKeys = (obj: any, prefix = '') => { + for (const key in obj) { + const nestedKey = prefix ? `${prefix}.${key}` : key + if (typeof obj[key] === 'object' && obj[key] !== null && !Array.isArray(obj[key])) { + // This is an object (but not array), recurse into it but don't add it as a key + iterateKeys(obj[key], nestedKey) + } + else { + // This is a leaf node (string, number, boolean, array, etc.), add it as a key + nestedKeys.push(nestedKey) + } + } + } + iterateKeys(translationObj) + + const fileKeys = nestedKeys.map(key => `${camelCaseFileName}.${key}`) + allKeys.push(...fileKeys) + } + catch (error) { + reject(error) + } + }) + resolve(allKeys) + }) + }) + } + + beforeEach(() => { + // Clean up and create test directories + if (fs.existsSync(testDir)) + fs.rmSync(testDir, { recursive: true }) + + fs.mkdirSync(testDir, { recursive: true }) + fs.mkdirSync(testEnDir, { recursive: true }) + fs.mkdirSync(testZhDir, { recursive: true }) + }) + + afterEach(() => { + // Clean up test files + if (fs.existsSync(testDir)) + fs.rmSync(testDir, { recursive: true }) + }) + + describe('Key extraction logic', () => { + it('should extract only leaf node keys, not intermediate objects', async () => { + const testContent = `const translation = { + simple: 'Simple Value', + nested: { + level1: 'Level 1 Value', + deep: { + level2: 'Level 2 Value' + } + }, + array: ['not extracted'], + number: 42, + boolean: true +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'test.ts'), testContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toEqual([ + 'test.simple', + 'test.nested.level1', + 'test.nested.deep.level2', + 'test.array', + 'test.number', + 'test.boolean', + ]) + + // Should not include intermediate object keys + expect(keys).not.toContain('test.nested') + expect(keys).not.toContain('test.nested.deep') + }) + + it('should handle camelCase file name conversion correctly', async () => { + const testContent = `const translation = { + key: 'value' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'app-debug.ts'), testContent) + fs.writeFileSync(path.join(testEnDir, 'user_profile.ts'), testContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toContain('appDebug.key') + expect(keys).toContain('userProfile.key') + }) + }) + + describe('Missing keys detection', () => { + it('should detect missing keys in target language', async () => { + const enContent = `const translation = { + common: { + save: 'Save', + cancel: 'Cancel', + delete: 'Delete' + }, + app: { + title: 'My App', + version: '1.0' + } +} + +export default translation +` + + const zhContent = `const translation = { + common: { + save: '保存', + cancel: '取消' + // missing 'delete' + }, + app: { + title: '我的应用' + // missing 'version' + } +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'test.ts'), enContent) + fs.writeFileSync(path.join(testZhDir, 'test.ts'), zhContent) + + const enKeys = await getKeysFromLanguage('en-US') + const zhKeys = await getKeysFromLanguage('zh-Hans') + + const missingKeys = enKeys.filter(key => !zhKeys.includes(key)) + + expect(missingKeys).toContain('test.common.delete') + expect(missingKeys).toContain('test.app.version') + expect(missingKeys).toHaveLength(2) + }) + }) + + describe('Extra keys detection', () => { + it('should detect extra keys in target language', async () => { + const enContent = `const translation = { + common: { + save: 'Save', + cancel: 'Cancel' + } +} + +export default translation +` + + const zhContent = `const translation = { + common: { + save: '保存', + cancel: '取消', + delete: '删除', // extra key + extra: '额外的' // another extra key + }, + newSection: { + someKey: '某个值' // extra section + } +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'test.ts'), enContent) + fs.writeFileSync(path.join(testZhDir, 'test.ts'), zhContent) + + const enKeys = await getKeysFromLanguage('en-US') + const zhKeys = await getKeysFromLanguage('zh-Hans') + + const extraKeys = zhKeys.filter(key => !enKeys.includes(key)) + + expect(extraKeys).toContain('test.common.delete') + expect(extraKeys).toContain('test.common.extra') + expect(extraKeys).toContain('test.newSection.someKey') + expect(extraKeys).toHaveLength(3) + }) + }) + + describe('File filtering logic', () => { + it('should filter keys by specific file correctly', async () => { + // Create multiple files + const file1Content = `const translation = { + button: 'Button', + text: 'Text' +} + +export default translation +` + + const file2Content = `const translation = { + title: 'Title', + description: 'Description' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'components.ts'), file1Content) + fs.writeFileSync(path.join(testEnDir, 'pages.ts'), file2Content) + fs.writeFileSync(path.join(testZhDir, 'components.ts'), file1Content) + fs.writeFileSync(path.join(testZhDir, 'pages.ts'), file2Content) + + const allEnKeys = await getKeysFromLanguage('en-US') + const allZhKeys = await getKeysFromLanguage('zh-Hans') + + // Test file filtering logic + const targetFile = 'components' + const filteredEnKeys = allEnKeys.filter(key => + key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())), + ) + const filteredZhKeys = allZhKeys.filter(key => + key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())), + ) + + expect(allEnKeys).toHaveLength(4) // 2 keys from each file + expect(filteredEnKeys).toHaveLength(2) // only components keys + expect(filteredEnKeys).toContain('components.button') + expect(filteredEnKeys).toContain('components.text') + expect(filteredEnKeys).not.toContain('pages.title') + expect(filteredEnKeys).not.toContain('pages.description') + }) + }) + + describe('Complex nested structure handling', () => { + it('should handle deeply nested objects correctly', async () => { + const complexContent = `const translation = { + level1: { + level2: { + level3: { + level4: { + deepValue: 'Deep Value' + }, + anotherValue: 'Another Value' + }, + simpleValue: 'Simple Value' + }, + directValue: 'Direct Value' + }, + rootValue: 'Root Value' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'complex.ts'), complexContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toContain('complex.level1.level2.level3.level4.deepValue') + expect(keys).toContain('complex.level1.level2.level3.anotherValue') + expect(keys).toContain('complex.level1.level2.simpleValue') + expect(keys).toContain('complex.level1.directValue') + expect(keys).toContain('complex.rootValue') + + // Should not include intermediate objects + expect(keys).not.toContain('complex.level1') + expect(keys).not.toContain('complex.level1.level2') + expect(keys).not.toContain('complex.level1.level2.level3') + expect(keys).not.toContain('complex.level1.level2.level3.level4') + }) + }) + + describe('Edge cases', () => { + it('should handle empty objects', async () => { + const emptyContent = `const translation = { + empty: {}, + withValue: 'value' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'empty.ts'), emptyContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toContain('empty.withValue') + expect(keys).not.toContain('empty.empty') + }) + + it('should handle special characters in keys', async () => { + const specialContent = `const translation = { + 'key-with-dash': 'value1', + 'key_with_underscore': 'value2', + 'key.with.dots': 'value3', + normalKey: 'value4' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'special.ts'), specialContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toContain('special.key-with-dash') + expect(keys).toContain('special.key_with_underscore') + expect(keys).toContain('special.key.with.dots') + expect(keys).toContain('special.normalKey') + }) + + it('should handle different value types', async () => { + const typesContent = `const translation = { + stringValue: 'string', + numberValue: 42, + booleanValue: true, + nullValue: null, + undefinedValue: undefined, + arrayValue: ['array', 'values'], + objectValue: { + nested: 'nested value' + } +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'types.ts'), typesContent) + + const keys = await getKeysFromLanguage('en-US') + + expect(keys).toContain('types.stringValue') + expect(keys).toContain('types.numberValue') + expect(keys).toContain('types.booleanValue') + expect(keys).toContain('types.nullValue') + expect(keys).toContain('types.undefinedValue') + expect(keys).toContain('types.arrayValue') + expect(keys).toContain('types.objectValue.nested') + expect(keys).not.toContain('types.objectValue') + }) + }) + + describe('Real-world scenario tests', () => { + it('should handle app-debug structure like real files', async () => { + const appDebugEn = `const translation = { + pageTitle: { + line1: 'Prompt', + line2: 'Engineering' + }, + operation: { + applyConfig: 'Publish', + resetConfig: 'Reset', + debugConfig: 'Debug' + }, + generate: { + instruction: 'Instructions', + generate: 'Generate', + resTitle: 'Generated Prompt', + noDataLine1: 'Describe your use case on the left,', + noDataLine2: 'the orchestration preview will show here.' + } +} + +export default translation +` + + const appDebugZh = `const translation = { + pageTitle: { + line1: '提示词', + line2: '编排' + }, + operation: { + applyConfig: '发布', + resetConfig: '重置', + debugConfig: '调试' + }, + generate: { + instruction: '指令', + generate: '生成', + resTitle: '生成的提示词', + noData: '在左侧描述您的用例,编排预览将在此处显示。' // This is extra + } +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'app-debug.ts'), appDebugEn) + fs.writeFileSync(path.join(testZhDir, 'app-debug.ts'), appDebugZh) + + const enKeys = await getKeysFromLanguage('en-US') + const zhKeys = await getKeysFromLanguage('zh-Hans') + + const missingKeys = enKeys.filter(key => !zhKeys.includes(key)) + const extraKeys = zhKeys.filter(key => !enKeys.includes(key)) + + expect(missingKeys).toContain('appDebug.generate.noDataLine1') + expect(missingKeys).toContain('appDebug.generate.noDataLine2') + expect(extraKeys).toContain('appDebug.generate.noData') + + expect(missingKeys).toHaveLength(2) + expect(extraKeys).toHaveLength(1) + }) + + it('should handle time structure with operation nested keys', async () => { + const timeEn = `const translation = { + months: { + January: 'January', + February: 'February' + }, + operation: { + now: 'Now', + ok: 'OK', + cancel: 'Cancel', + pickDate: 'Pick Date' + }, + title: { + pickTime: 'Pick Time' + }, + defaultPlaceholder: 'Pick a time...' +} + +export default translation +` + + const timeZh = `const translation = { + months: { + January: '一月', + February: '二月' + }, + operation: { + now: '此刻', + ok: '确定', + cancel: '取消', + pickDate: '选择日期' + }, + title: { + pickTime: '选择时间' + }, + pickDate: '选择日期', // This is extra - duplicates operation.pickDate + defaultPlaceholder: '请选择时间...' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'time.ts'), timeEn) + fs.writeFileSync(path.join(testZhDir, 'time.ts'), timeZh) + + const enKeys = await getKeysFromLanguage('en-US') + const zhKeys = await getKeysFromLanguage('zh-Hans') + + const missingKeys = enKeys.filter(key => !zhKeys.includes(key)) + const extraKeys = zhKeys.filter(key => !enKeys.includes(key)) + + expect(missingKeys).toHaveLength(0) // No missing keys + expect(extraKeys).toContain('time.pickDate') // Extra root-level pickDate + expect(extraKeys).toHaveLength(1) + + // Should have both keys available + expect(zhKeys).toContain('time.operation.pickDate') // Correct nested key + expect(zhKeys).toContain('time.pickDate') // Extra duplicate key + }) + }) + + describe('Statistics calculation', () => { + it('should calculate correct difference statistics', async () => { + const enContent = `const translation = { + key1: 'value1', + key2: 'value2', + key3: 'value3' +} + +export default translation +` + + const zhContentMissing = `const translation = { + key1: 'value1', + key2: 'value2' + // missing key3 +} + +export default translation +` + + const zhContentExtra = `const translation = { + key1: 'value1', + key2: 'value2', + key3: 'value3', + key4: 'extra', + key5: 'extra2' +} + +export default translation +` + + fs.writeFileSync(path.join(testEnDir, 'stats.ts'), enContent) + + // Test missing keys scenario + fs.writeFileSync(path.join(testZhDir, 'stats.ts'), zhContentMissing) + + const enKeys = await getKeysFromLanguage('en-US') + const zhKeysMissing = await getKeysFromLanguage('zh-Hans') + + expect(enKeys.length - zhKeysMissing.length).toBe(1) // +1 means 1 missing key + + // Test extra keys scenario + fs.writeFileSync(path.join(testZhDir, 'stats.ts'), zhContentExtra) + + const zhKeysExtra = await getKeysFromLanguage('zh-Hans') + + expect(enKeys.length - zhKeysExtra.length).toBe(-2) // -2 means 2 extra keys + }) + }) +}) diff --git a/web/i18n-config/check-i18n.js b/web/i18n-config/check-i18n.js index 7e3b725c9e..edc2566a3c 100644 --- a/web/i18n-config/check-i18n.js +++ b/web/i18n-config/check-i18n.js @@ -58,9 +58,14 @@ async function getKeysFromLanguage(language) { const iterateKeys = (obj, prefix = '') => { for (const key in obj) { const nestedKey = prefix ? `${prefix}.${key}` : key - nestedKeys.push(nestedKey) - if (typeof obj[key] === 'object' && obj[key] !== null) + if (typeof obj[key] === 'object' && obj[key] !== null && !Array.isArray(obj[key])) { + // This is an object (but not array), recurse into it but don't add it as a key iterateKeys(obj[key], nestedKey) + } + else { + // This is a leaf node (string, number, boolean, array, etc.), add it as a key + nestedKeys.push(nestedKey) + } } } iterateKeys(translationObj) @@ -79,15 +84,176 @@ async function getKeysFromLanguage(language) { }) } +function removeKeysFromObject(obj, keysToRemove, prefix = '') { + let modified = false + for (const key in obj) { + const fullKey = prefix ? `${prefix}.${key}` : key + + if (keysToRemove.includes(fullKey)) { + delete obj[key] + modified = true + console.log(`🗑️ Removed key: ${fullKey}`) + } + else if (typeof obj[key] === 'object' && obj[key] !== null) { + const subModified = removeKeysFromObject(obj[key], keysToRemove, fullKey) + modified = modified || subModified + } + } + return modified +} + +async function removeExtraKeysFromFile(language, fileName, extraKeys) { + const filePath = path.resolve(__dirname, '../i18n', language, `${fileName}.ts`) + + if (!fs.existsSync(filePath)) { + console.log(`⚠️ File not found: ${filePath}`) + return false + } + + try { + // Filter keys that belong to this file + const camelCaseFileName = fileName.replace(/[-_](.)/g, (_, c) => c.toUpperCase()) + const fileSpecificKeys = extraKeys + .filter(key => key.startsWith(`${camelCaseFileName}.`)) + .map(key => key.substring(camelCaseFileName.length + 1)) // Remove file prefix + + if (fileSpecificKeys.length === 0) + return false + + console.log(`🔄 Processing file: ${filePath}`) + + // Read the original file content + const content = fs.readFileSync(filePath, 'utf8') + const lines = content.split('\n') + + let modified = false + const linesToRemove = [] + + // Find lines to remove for each key + for (const keyToRemove of fileSpecificKeys) { + const keyParts = keyToRemove.split('.') + let targetLineIndex = -1 + + // Build regex pattern for the exact key path + if (keyParts.length === 1) { + // Simple key at root level like "pickDate: 'value'" + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + const simpleKeyPattern = new RegExp(`^\\s*${keyParts[0]}\\s*:`) + if (simpleKeyPattern.test(line)) { + targetLineIndex = i + break + } + } + } + else { + // Nested key - need to find the exact path + const currentPath = [] + let braceDepth = 0 + + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + const trimmedLine = line.trim() + + // Track current object path + const keyMatch = trimmedLine.match(/^(\w+)\s*:\s*{/) + if (keyMatch) { + currentPath.push(keyMatch[1]) + braceDepth++ + } + else if (trimmedLine === '},' || trimmedLine === '}') { + if (braceDepth > 0) { + braceDepth-- + currentPath.pop() + } + } + + // Check if this line matches our target key + const leafKeyMatch = trimmedLine.match(/^(\w+)\s*:/) + if (leafKeyMatch) { + const fullPath = [...currentPath, leafKeyMatch[1]] + const fullPathString = fullPath.join('.') + + if (fullPathString === keyToRemove) { + targetLineIndex = i + break + } + } + } + } + + if (targetLineIndex !== -1) { + linesToRemove.push(targetLineIndex) + console.log(`🗑️ Found key to remove: ${keyToRemove} at line ${targetLineIndex + 1}`) + modified = true + } + else { + console.log(`⚠️ Could not find key: ${keyToRemove}`) + } + } + + if (modified) { + // Remove lines in reverse order to maintain correct indices + linesToRemove.sort((a, b) => b - a) + + for (const lineIndex of linesToRemove) { + const line = lines[lineIndex] + console.log(`🗑️ Removing line ${lineIndex + 1}: ${line.trim()}`) + lines.splice(lineIndex, 1) + + // Also remove trailing comma from previous line if it exists and the next line is a closing brace + if (lineIndex > 0 && lineIndex < lines.length) { + const prevLine = lines[lineIndex - 1] + const nextLine = lines[lineIndex] ? lines[lineIndex].trim() : '' + + if (prevLine.trim().endsWith(',') && (nextLine.startsWith('}') || nextLine === '')) + lines[lineIndex - 1] = prevLine.replace(/,\s*$/, '') + } + } + + // Write back to file + const newContent = lines.join('\n') + fs.writeFileSync(filePath, newContent) + console.log(`💾 Updated file: ${filePath}`) + return true + } + + return false + } + catch (error) { + console.error(`Error processing file ${filePath}:`, error.message) + return false + } +} + +// Add command line argument support +const targetFile = process.argv.find(arg => arg.startsWith('--file='))?.split('=')[1] +const targetLang = process.argv.find(arg => arg.startsWith('--lang='))?.split('=')[1] +const autoRemove = process.argv.includes('--auto-remove') + async function main() { const compareKeysCount = async () => { - const targetKeys = await getKeysFromLanguage(targetLanguage) - const languagesKeys = await Promise.all(languages.map(language => getKeysFromLanguage(language))) + const allTargetKeys = await getKeysFromLanguage(targetLanguage) + + // Filter target keys by file if specified + const targetKeys = targetFile + ? allTargetKeys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase()))) + : allTargetKeys + + // Filter languages by target language if specified + const languagesToProcess = targetLang ? [targetLang] : languages + + const allLanguagesKeys = await Promise.all(languagesToProcess.map(language => getKeysFromLanguage(language))) + + // Filter language keys by file if specified + const languagesKeys = targetFile + ? allLanguagesKeys.map(keys => keys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())))) + : allLanguagesKeys const keysCount = languagesKeys.map(keys => keys.length) const targetKeysCount = targetKeys.length - const comparison = languages.reduce((result, language, index) => { + const comparison = languagesToProcess.reduce((result, language, index) => { const languageKeysCount = keysCount[index] const difference = targetKeysCount - languageKeysCount result[language] = difference @@ -96,13 +262,52 @@ async function main() { console.log(comparison) - // Print missing keys - languages.forEach((language, index) => { - const missingKeys = targetKeys.filter(key => !languagesKeys[index].includes(key)) + // Print missing keys and extra keys + for (let index = 0; index < languagesToProcess.length; index++) { + const language = languagesToProcess[index] + const languageKeys = languagesKeys[index] + const missingKeys = targetKeys.filter(key => !languageKeys.includes(key)) + const extraKeys = languageKeys.filter(key => !targetKeys.includes(key)) + console.log(`Missing keys in ${language}:`, missingKeys) - }) + + // Show extra keys only when there are extra keys (negative difference) + if (extraKeys.length > 0) { + console.log(`Extra keys in ${language} (not in ${targetLanguage}):`, extraKeys) + + // Auto-remove extra keys if flag is set + if (autoRemove) { + console.log(`\n🤖 Auto-removing extra keys from ${language}...`) + + // Get all translation files + const i18nFolder = path.resolve(__dirname, '../i18n', language) + const files = fs.readdirSync(i18nFolder) + .filter(file => /\.ts$/.test(file)) + .map(file => file.replace(/\.ts$/, '')) + .filter(f => !targetFile || f === targetFile) // Filter by target file if specified + + let totalRemoved = 0 + for (const fileName of files) { + const removed = await removeExtraKeysFromFile(language, fileName, extraKeys) + if (removed) totalRemoved++ + } + + console.log(`✅ Auto-removal completed for ${language}. Modified ${totalRemoved} files.`) + } + } + } } + console.log('🚀 Starting check-i18n script...') + if (targetFile) + console.log(`📁 Checking file: ${targetFile}`) + + if (targetLang) + console.log(`🌍 Checking language: ${targetLang}`) + + if (autoRemove) + console.log('🤖 Auto-remove mode: ENABLED') + compareKeysCount() } diff --git a/web/i18n/zh-Hans/app-annotation.ts b/web/i18n/zh-Hans/app-annotation.ts index 44d075715f..cb2d3be0cd 100644 --- a/web/i18n/zh-Hans/app-annotation.ts +++ b/web/i18n/zh-Hans/app-annotation.ts @@ -9,8 +9,6 @@ const translation = { table: { header: { question: '提问', - match: '匹配', - response: '回复', answer: '答案', createdAt: '创建时间', hits: '命中次数', @@ -71,7 +69,6 @@ const translation = { noHitHistory: '没有命中历史', }, hitHistoryTable: { - question: '问题', query: '提问', match: '匹配', response: '回复', diff --git a/web/i18n/zh-Hans/app-debug.ts b/web/i18n/zh-Hans/app-debug.ts index 8bdb56ac64..b58eedb5b3 100644 --- a/web/i18n/zh-Hans/app-debug.ts +++ b/web/i18n/zh-Hans/app-debug.ts @@ -254,7 +254,6 @@ const translation = { noDataLine1: '在左侧描述您的用例,', noDataLine2: '编排预览将在此处显示。', apply: '应用', - noData: '在左侧描述您的用例,编排预览将在此处显示。', loading: '为您编排应用程序中…', overwriteTitle: '覆盖现有配置?', overwriteMessage: '应用此提示将覆盖现有配置。', diff --git a/web/i18n/zh-Hans/app.ts b/web/i18n/zh-Hans/app.ts index 9e577a360e..7c8b292ce4 100644 --- a/web/i18n/zh-Hans/app.ts +++ b/web/i18n/zh-Hans/app.ts @@ -35,7 +35,6 @@ const translation = { learnMore: '了解更多', startFromBlank: '创建空白应用', startFromTemplate: '从应用模版创建', - captionAppType: '想要哪种应用类型?', foundResult: '{{count}} 个结果', foundResults: '{{count}} 个结果', noAppsFound: '未找到应用', @@ -45,7 +44,6 @@ const translation = { chatbotUserDescription: '通过简单的配置快速搭建一个基于 LLM 的对话机器人。支持切换为 Chatflow 编排。', completionShortDescription: '用于文本生成任务的 AI 助手', completionUserDescription: '通过简单的配置快速搭建一个面向文本生成类任务的 AI 助手。', - completionWarning: '该类型不久后将不再支持创建', agentShortDescription: '具备推理与自主工具调用的智能助手', agentUserDescription: '能够迭代式的规划推理、自主工具调用,直至完成任务目标的智能助手。', workflowShortDescription: '面向单轮自动化任务的编排工作流', diff --git a/web/i18n/zh-Hans/login.ts b/web/i18n/zh-Hans/login.ts index b63630e288..2276436d0e 100644 --- a/web/i18n/zh-Hans/login.ts +++ b/web/i18n/zh-Hans/login.ts @@ -77,7 +77,6 @@ const translation = { activated: '现在登录', adminInitPassword: '管理员初始化密码', validate: '验证', - sso: '使用 SSO 继续', checkCode: { checkYourEmail: '验证您的电子邮件', tips: '验证码已经发送到您的邮箱 {{email}}', diff --git a/web/i18n/zh-Hans/time.ts b/web/i18n/zh-Hans/time.ts index 5158a710b5..8a223d9dd1 100644 --- a/web/i18n/zh-Hans/time.ts +++ b/web/i18n/zh-Hans/time.ts @@ -26,7 +26,6 @@ const translation = { now: '此刻', ok: '确定', cancel: '取消', - pickDate: '选择日期', }, title: { pickTime: '选择时间', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index 81e207f67e..1f0300ae2a 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -213,7 +213,6 @@ const translation = { startRun: '开始运行', running: '运行中', testRunIteration: '测试运行迭代', - testRunLoop: '测试运行循环', back: '返回', iteration: '迭代', loop: '循环', diff --git a/web/i18n/zh-Hant/app-annotation.ts b/web/i18n/zh-Hant/app-annotation.ts index 02eb98f5d4..538546928c 100644 --- a/web/i18n/zh-Hant/app-annotation.ts +++ b/web/i18n/zh-Hant/app-annotation.ts @@ -9,8 +9,6 @@ const translation = { table: { header: { question: '提問', - match: '匹配', - response: '回覆', answer: '答案', createdAt: '建立時間', hits: '命中次數', @@ -71,7 +69,6 @@ const translation = { noHitHistory: '沒有命中歷史', }, hitHistoryTable: { - question: '問題', query: '提問', match: '匹配', response: '回覆', diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts index e6a3a0b570..0bf99d5067 100644 --- a/web/i18n/zh-Hant/app.ts +++ b/web/i18n/zh-Hant/app.ts @@ -26,21 +26,7 @@ const translation = { newApp: { startFromBlank: '建立空白應用', startFromTemplate: '從應用模版建立', - captionAppType: '想要哪種應用類型?', - chatbotDescription: '使用大型語言模型構建聊天助手', - completionDescription: '構建一個根據提示生成高品質文字的應用程式,例如生成文章、摘要、翻譯等。', - completionWarning: '該類型不久後將不再支援建立', - agentDescription: '構建一個智慧 Agent,可以自主選擇工具來完成任務', - workflowDescription: '以工作流的形式編排生成型應用,提供更多的自訂設定。它適合有經驗的使用者。', workflowWarning: '正在進行 Beta 測試', - chatbotType: '聊天助手編排方法', - basic: '基礎編排', - basicTip: '新手適用,可以切換成工作流編排', - basicFor: '新手適用', - basicDescription: '基本編排允許使用簡單的設定編排聊天機器人應用程式,而無需修改內建提示。它適合初學者。', - advanced: '工作流編排', - advancedFor: '進階使用者適用', - advancedDescription: '工作流編排以工作流的形式編排聊天機器人,提供自訂設定,包括編輯內建提示的能力。它適合有經驗的使用者。', captionName: '應用名稱 & 圖示', appNamePlaceholder: '給你的應用起個名字', captionDescription: '描述', diff --git a/web/i18n/zh-Hant/billing.ts b/web/i18n/zh-Hant/billing.ts index 6ede2c6213..f957bc4eab 100644 --- a/web/i18n/zh-Hant/billing.ts +++ b/web/i18n/zh-Hant/billing.ts @@ -23,18 +23,13 @@ const translation = { contractOwner: '聯絡團隊管理員', free: '免費', startForFree: '免費開始', - getStartedWith: '開始使用', contactSales: '聯絡銷售', talkToSales: '聯絡銷售', modelProviders: '支援的模型提供商', - teamMembers: '團隊成員', buildApps: '構建應用程式數', vectorSpace: '向量空間', vectorSpaceTooltip: '向量空間是 LLMs 理解您的資料所需的長期記憶系統。', - vectorSpaceBillingTooltip: '向量儲存是將知識庫向量化處理後為讓 LLMs 理解資料而使用的長期記憶儲存,1MB 大約能滿足 1.2 million character 的向量化後資料儲存(以 OpenAI Embedding 模型估算,不同模型計算方式有差異)。在向量化過程中,實際的壓縮或尺寸減小取決於內容的複雜性和冗餘性。', - documentsUploadQuota: '文件上傳配額', documentProcessingPriority: '文件處理優先順序', - documentProcessingPriorityTip: '如需更高的文件處理優先順序,請升級您的套餐', documentProcessingPriorityUpgrade: '以更快的速度、更高的精度處理更多的資料。', priority: { 'standard': '標準', @@ -103,19 +98,16 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 次 GPT 免費試用', - includesTitle: '包括:', for: '核心功能免費試用', }, professional: { name: 'Professional', description: '讓個人和小團隊能夠以經濟實惠的方式釋放更多能力。', - includesTitle: 'Sandbox 計劃中的一切,加上:', for: '適合獨立開發者/小型團隊', }, team: { name: 'Team', description: '協作無限制並享受頂級效能。', - includesTitle: 'Professional 計劃中的一切,加上:', for: '適用於中型團隊', }, enterprise: { @@ -123,15 +115,6 @@ const translation = { description: '獲得大規模關鍵任務系統的完整功能和支援。', includesTitle: 'Team 計劃中的一切,加上:', features: { - 1: '商業許可證授權', - 6: '先進安全與控制', - 3: '多個工作區及企業管理', - 2: '專屬企業功能', - 4: '單一登入', - 8: '專業技術支援', - 0: '企業級可擴展部署解決方案', - 7: 'Dify 官方的更新和維護', - 5: '由 Dify 合作夥伴協商的服務水平協議', }, price: '自訂', btnText: '聯繫銷售', @@ -140,9 +123,6 @@ const translation = { }, community: { features: { - 0: '所有核心功能均在公共存儲庫下釋出', - 2: '遵循 Dify 開源許可證', - 1: '單一工作區域', }, includesTitle: '免費功能:', btnText: '開始使用社區', @@ -153,10 +133,6 @@ const translation = { }, premium: { features: { - 2: '網頁應用程序標誌及品牌自定義', - 0: '各種雲端服務提供商的自我管理可靠性', - 1: '單一工作區域', - 3: '優先電子郵件及聊天支持', }, for: '適用於中型組織和團隊', comingSoon: '微軟 Azure 與 Google Cloud 支持即將推出', @@ -173,8 +149,6 @@ const translation = { fullSolution: '升級您的套餐以獲得更多空間。', }, apps: { - fullTipLine1: '升級您的套餐以', - fullTipLine2: '構建更多的程式。', fullTip1: '升級以創建更多應用程序', fullTip2des: '建議清除不活躍的應用程式以釋放使用空間,或聯繫我們。', contactUs: '聯繫我們', diff --git a/web/i18n/zh-Hant/common.ts b/web/i18n/zh-Hant/common.ts index 6404d0e003..ccfca85bfe 100644 --- a/web/i18n/zh-Hant/common.ts +++ b/web/i18n/zh-Hant/common.ts @@ -197,7 +197,6 @@ const translation = { showAppLength: '顯示 {{length}} 個應用', delete: '刪除帳戶', deleteTip: '刪除您的帳戶將永久刪除您的所有資料並且無法恢復。', - deleteConfirmTip: '請將以下內容從您的註冊電子郵件發送至 ', account: '帳戶', myAccount: '我的帳戶', studio: '工作室', diff --git a/web/i18n/zh-Hant/dataset-creation.ts b/web/i18n/zh-Hant/dataset-creation.ts index fca1ff651e..e99fb0c320 100644 --- a/web/i18n/zh-Hant/dataset-creation.ts +++ b/web/i18n/zh-Hant/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: '建立知識庫', - update: '上傳檔案', fallbackRoute: '知識', }, one: '選擇資料來源', diff --git a/web/i18n/zh-Hant/dataset-documents.ts b/web/i18n/zh-Hant/dataset-documents.ts index b04a339070..1b482f181f 100644 --- a/web/i18n/zh-Hant/dataset-documents.ts +++ b/web/i18n/zh-Hant/dataset-documents.ts @@ -341,7 +341,6 @@ const translation = { keywords: '關鍵詞', addKeyWord: '新增關鍵詞', keywordError: '關鍵詞最大長度為 20', - characters: '字元', hitCount: '召回次數', vectorHash: '向量雜湊:', questionPlaceholder: '在這裡新增問題', diff --git a/web/i18n/zh-Hant/dataset-hit-testing.ts b/web/i18n/zh-Hant/dataset-hit-testing.ts index 0dbe149025..4b8cc5150a 100644 --- a/web/i18n/zh-Hant/dataset-hit-testing.ts +++ b/web/i18n/zh-Hant/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: '召回測試', desc: '基於給定的查詢文字測試知識庫的召回效果。', dateTimeFormat: 'YYYY-MM-DD HH:mm', - recents: '最近查詢', table: { header: { source: '資料來源', diff --git a/web/i18n/zh-Hant/login.ts b/web/i18n/zh-Hant/login.ts index ae617cb5c0..8187323276 100644 --- a/web/i18n/zh-Hant/login.ts +++ b/web/i18n/zh-Hant/login.ts @@ -70,7 +70,6 @@ const translation = { activated: '現在登入', adminInitPassword: '管理員初始化密碼', validate: '驗證', - sso: '繼續使用 SSO', checkCode: { verify: '驗證', resend: '發送', diff --git a/web/i18n/zh-Hant/tools.ts b/web/i18n/zh-Hant/tools.ts index fbfb09e321..9dad3a74cf 100644 --- a/web/i18n/zh-Hant/tools.ts +++ b/web/i18n/zh-Hant/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'HTTP 頭部名稱,如果你不知道是什麼,可以將其保留為 Authorization 或設定為自定義值', types: { none: '無', - api_key: 'API Key', apiKeyPlaceholder: 'HTTP 頭部名稱,用於傳遞 API Key', apiValuePlaceholder: '輸入 API Key', api_key_query: '查詢參數', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index 935d042fa7..bcdfbb81d3 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -107,10 +107,8 @@ const translation = { loadMore: '載入更多工作流', noHistory: '無歷史記錄', publishUpdate: '發布更新', - referenceVar: '參考變量', exportSVG: '匯出為 SVG', exportPNG: '匯出為 PNG', - noExist: '沒有這個變數', versionHistory: '版本歷史', exitVersions: '退出版本', exportImage: '匯出圖像', @@ -610,7 +608,6 @@ const translation = { }, select: '選擇', addSubVariable: '子變數', - condition: '條件', }, variableAssigner: { title: '變量賦值', From 00cb1c26a1472d2512173453fef8a7f6669c35de Mon Sep 17 00:00:00 2001 From: Shaun Date: Tue, 29 Jul 2025 19:34:46 +0800 Subject: [PATCH 056/415] refactor: pass external_trace_id to message trace (#23089) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/service_api/app/completion.py | 3 ++ api/core/ops/aliyun_trace/aliyun_trace.py | 28 ++++++++++++--- .../aliyun_trace/data_exporter/traceclient.py | 12 +++++-- .../arize_phoenix_trace.py | 28 ++++++++------- api/core/ops/entities/trace_entity.py | 1 + api/core/ops/langfuse_trace/langfuse_trace.py | 19 +++++----- .../ops/langsmith_trace/langsmith_trace.py | 17 +++++---- api/core/ops/opik_trace/opik_trace.py | 19 +++++----- api/core/ops/ops_trace_manager.py | 15 +++++--- api/core/ops/weave_trace/weave_trace.py | 35 +++++++++++++------ 10 files changed, 115 insertions(+), 62 deletions(-) diff --git a/api/controllers/service_api/app/completion.py b/api/controllers/service_api/app/completion.py index 7762672494..edc66cc5e9 100644 --- a/api/controllers/service_api/app/completion.py +++ b/api/controllers/service_api/app/completion.py @@ -47,6 +47,9 @@ class CompletionApi(Resource): parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json") args = parser.parse_args() + external_trace_id = get_external_trace_id(request) + if external_trace_id: + args["external_trace_id"] = external_trace_id streaming = args["response_mode"] == "streaming" diff --git a/api/core/ops/aliyun_trace/aliyun_trace.py b/api/core/ops/aliyun_trace/aliyun_trace.py index af0e38f7ef..06050619e9 100644 --- a/api/core/ops/aliyun_trace/aliyun_trace.py +++ b/api/core/ops/aliyun_trace/aliyun_trace.py @@ -10,6 +10,7 @@ from sqlalchemy.orm import Session, sessionmaker from core.ops.aliyun_trace.data_exporter.traceclient import ( TraceClient, convert_datetime_to_nanoseconds, + convert_string_to_id, convert_to_span_id, convert_to_trace_id, generate_span_id, @@ -101,8 +102,9 @@ class AliyunDataTrace(BaseTraceInstance): raise ValueError(f"Aliyun get run url failed: {str(e)}") def workflow_trace(self, trace_info: WorkflowTraceInfo): - external_trace_id = trace_info.metadata.get("external_trace_id") - trace_id = external_trace_id or convert_to_trace_id(trace_info.workflow_run_id) + trace_id = convert_to_trace_id(trace_info.workflow_run_id) + if trace_info.trace_id: + trace_id = convert_string_to_id(trace_info.trace_id) workflow_span_id = convert_to_span_id(trace_info.workflow_run_id, "workflow") self.add_workflow_span(trace_id, workflow_span_id, trace_info) @@ -130,6 +132,9 @@ class AliyunDataTrace(BaseTraceInstance): status = Status(StatusCode.ERROR, trace_info.error) trace_id = convert_to_trace_id(message_id) + if trace_info.trace_id: + trace_id = convert_string_to_id(trace_info.trace_id) + message_span_id = convert_to_span_id(message_id, "message") message_span = SpanData( trace_id=trace_id, @@ -186,9 +191,13 @@ class AliyunDataTrace(BaseTraceInstance): return message_id = trace_info.message_id + trace_id = convert_to_trace_id(message_id) + if trace_info.trace_id: + trace_id = convert_string_to_id(trace_info.trace_id) + documents_data = extract_retrieval_documents(trace_info.documents) dataset_retrieval_span = SpanData( - trace_id=convert_to_trace_id(message_id), + trace_id=trace_id, parent_span_id=convert_to_span_id(message_id, "message"), span_id=generate_span_id(), name="dataset_retrieval", @@ -214,8 +223,12 @@ class AliyunDataTrace(BaseTraceInstance): if trace_info.error: status = Status(StatusCode.ERROR, trace_info.error) + trace_id = convert_to_trace_id(message_id) + if trace_info.trace_id: + trace_id = convert_string_to_id(trace_info.trace_id) + tool_span = SpanData( - trace_id=convert_to_trace_id(message_id), + trace_id=trace_id, parent_span_id=convert_to_span_id(message_id, "message"), span_id=generate_span_id(), name=trace_info.tool_name, @@ -451,8 +464,13 @@ class AliyunDataTrace(BaseTraceInstance): status: Status = Status(StatusCode.OK) if trace_info.error: status = Status(StatusCode.ERROR, trace_info.error) + + trace_id = convert_to_trace_id(message_id) + if trace_info.trace_id: + trace_id = convert_string_to_id(trace_info.trace_id) + suggested_question_span = SpanData( - trace_id=convert_to_trace_id(message_id), + trace_id=trace_id, parent_span_id=convert_to_span_id(message_id, "message"), span_id=convert_to_span_id(message_id, "suggested_question"), name="suggested_question", diff --git a/api/core/ops/aliyun_trace/data_exporter/traceclient.py b/api/core/ops/aliyun_trace/data_exporter/traceclient.py index 934ce95a64..bd19c8a503 100644 --- a/api/core/ops/aliyun_trace/data_exporter/traceclient.py +++ b/api/core/ops/aliyun_trace/data_exporter/traceclient.py @@ -181,15 +181,21 @@ def convert_to_trace_id(uuid_v4: Optional[str]) -> int: raise ValueError(f"Invalid UUID input: {e}") +def convert_string_to_id(string: Optional[str]) -> int: + if not string: + return generate_span_id() + hash_bytes = hashlib.sha256(string.encode("utf-8")).digest() + id = int.from_bytes(hash_bytes[:8], byteorder="big", signed=False) + return id + + def convert_to_span_id(uuid_v4: Optional[str], span_type: str) -> int: try: uuid_obj = uuid.UUID(uuid_v4) except Exception as e: raise ValueError(f"Invalid UUID input: {e}") combined_key = f"{uuid_obj.hex}-{span_type}" - hash_bytes = hashlib.sha256(combined_key.encode("utf-8")).digest() - span_id = int.from_bytes(hash_bytes[:8], byteorder="big", signed=False) - return span_id + return convert_string_to_id(combined_key) def convert_datetime_to_nanoseconds(start_time_a: Optional[datetime]) -> Optional[int]: diff --git a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py index f252a022d8..a20f2485c8 100644 --- a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py +++ b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py @@ -91,16 +91,21 @@ def datetime_to_nanos(dt: Optional[datetime]) -> int: return int(dt.timestamp() * 1_000_000_000) -def uuid_to_trace_id(string: Optional[str]) -> int: - """Convert UUID string to a valid trace ID (16-byte integer).""" +def string_to_trace_id128(string: Optional[str]) -> int: + """ + Convert any input string into a stable 128-bit integer trace ID. + + This uses SHA-256 hashing and takes the first 16 bytes (128 bits) of the digest. + It's suitable for generating consistent, unique identifiers from strings. + """ if string is None: string = "" hash_object = hashlib.sha256(string.encode()) - # Take the first 16 bytes (128 bits) of the hash + # Take the first 16 bytes (128 bits) of the hash digest digest = hash_object.digest()[:16] - # Convert to integer (128 bits) + # Convert to a 128-bit integer return int.from_bytes(digest, byteorder="big") @@ -153,8 +158,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): } workflow_metadata.update(trace_info.metadata) - external_trace_id = trace_info.metadata.get("external_trace_id") - trace_id = external_trace_id or uuid_to_trace_id(trace_info.workflow_run_id) + trace_id = string_to_trace_id128(trace_info.trace_id or trace_info.workflow_run_id) span_id = RandomIdGenerator().generate_span_id() context = SpanContext( trace_id=trace_id, @@ -310,7 +314,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): SpanAttributes.SESSION_ID: trace_info.message_data.conversation_id, } - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.trace_id or trace_info.message_id) message_span_id = RandomIdGenerator().generate_span_id() span_context = SpanContext( trace_id=trace_id, @@ -406,7 +410,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): } metadata.update(trace_info.metadata) - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.message_id) span_id = RandomIdGenerator().generate_span_id() context = SpanContext( trace_id=trace_id, @@ -468,7 +472,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): } metadata.update(trace_info.metadata) - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.message_id) span_id = RandomIdGenerator().generate_span_id() context = SpanContext( trace_id=trace_id, @@ -521,7 +525,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): } metadata.update(trace_info.metadata) - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.message_id) span_id = RandomIdGenerator().generate_span_id() context = SpanContext( trace_id=trace_id, @@ -568,7 +572,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): "tool_config": json.dumps(trace_info.tool_config, ensure_ascii=False), } - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.message_id) tool_span_id = RandomIdGenerator().generate_span_id() logger.info("[Arize/Phoenix] Creating tool trace with trace_id: %s, span_id: %s", trace_id, tool_span_id) @@ -629,7 +633,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): } metadata.update(trace_info.metadata) - trace_id = uuid_to_trace_id(trace_info.message_id) + trace_id = string_to_trace_id128(trace_info.message_id) span_id = RandomIdGenerator().generate_span_id() context = SpanContext( trace_id=trace_id, diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 151fa2aaf4..3bad5c92fb 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -14,6 +14,7 @@ class BaseTraceInfo(BaseModel): start_time: Optional[datetime] = None end_time: Optional[datetime] = None metadata: dict[str, Any] + trace_id: Optional[str] = None @field_validator("inputs", "outputs") @classmethod diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index d356e735ee..3a03d9f4fe 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -67,14 +67,13 @@ class LangFuseDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - external_trace_id = trace_info.metadata.get("external_trace_id") - trace_id = external_trace_id or trace_info.workflow_run_id + trace_id = trace_info.trace_id or trace_info.workflow_run_id user_id = trace_info.metadata.get("user_id") metadata = trace_info.metadata metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id if trace_info.message_id: - trace_id = external_trace_id or trace_info.message_id + trace_id = trace_info.trace_id or trace_info.message_id name = TraceTaskName.MESSAGE_TRACE.value trace_data = LangfuseTrace( id=trace_id, @@ -250,8 +249,10 @@ class LangFuseDataTrace(BaseTraceInstance): user_id = end_user_data.session_id metadata["user_id"] = user_id + trace_id = trace_info.trace_id or message_id + trace_data = LangfuseTrace( - id=message_id, + id=trace_id, user_id=user_id, name=TraceTaskName.MESSAGE_TRACE.value, input={ @@ -285,7 +286,7 @@ class LangFuseDataTrace(BaseTraceInstance): langfuse_generation_data = LangfuseGeneration( name="llm", - trace_id=message_id, + trace_id=trace_id, start_time=trace_info.start_time, end_time=trace_info.end_time, model=message_data.model_id, @@ -311,7 +312,7 @@ class LangFuseDataTrace(BaseTraceInstance): "preset_response": trace_info.preset_response, "inputs": trace_info.inputs, }, - trace_id=trace_info.message_id, + trace_id=trace_info.trace_id or trace_info.message_id, start_time=trace_info.start_time or trace_info.message_data.created_at, end_time=trace_info.end_time or trace_info.message_data.created_at, metadata=trace_info.metadata, @@ -334,7 +335,7 @@ class LangFuseDataTrace(BaseTraceInstance): name=TraceTaskName.SUGGESTED_QUESTION_TRACE.value, input=trace_info.inputs, output=str(trace_info.suggested_question), - trace_id=trace_info.message_id, + trace_id=trace_info.trace_id or trace_info.message_id, start_time=trace_info.start_time, end_time=trace_info.end_time, metadata=trace_info.metadata, @@ -352,7 +353,7 @@ class LangFuseDataTrace(BaseTraceInstance): name=TraceTaskName.DATASET_RETRIEVAL_TRACE.value, input=trace_info.inputs, output={"documents": trace_info.documents}, - trace_id=trace_info.message_id, + trace_id=trace_info.trace_id or trace_info.message_id, start_time=trace_info.start_time or trace_info.message_data.created_at, end_time=trace_info.end_time or trace_info.message_data.updated_at, metadata=trace_info.metadata, @@ -365,7 +366,7 @@ class LangFuseDataTrace(BaseTraceInstance): name=trace_info.tool_name, input=trace_info.tool_inputs, output=trace_info.tool_outputs, - trace_id=trace_info.message_id, + trace_id=trace_info.trace_id or trace_info.message_id, start_time=trace_info.start_time, end_time=trace_info.end_time, metadata=trace_info.metadata, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index fb3f6ecf0d..f9e5128e89 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -65,8 +65,7 @@ class LangSmithDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - external_trace_id = trace_info.metadata.get("external_trace_id") - trace_id = external_trace_id or trace_info.message_id or trace_info.workflow_run_id + trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id if trace_info.start_time is None: trace_info.start_time = datetime.now() message_dotted_order = ( @@ -290,7 +289,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, parent_run_id=None, ) @@ -319,7 +318,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, id=str(uuid.uuid4()), ) @@ -351,7 +350,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, error="", file_list=[], @@ -381,7 +380,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, error="", file_list=[], @@ -410,7 +409,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, error="", file_list=[], @@ -440,7 +439,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, error=trace_info.error or "", ) @@ -465,7 +464,7 @@ class LangSmithDataTrace(BaseTraceInstance): reference_example_id=None, input_attachments={}, output_attachments={}, - trace_id=None, + trace_id=trace_info.trace_id, dotted_order=None, error="", file_list=[], diff --git a/api/core/ops/opik_trace/opik_trace.py b/api/core/ops/opik_trace/opik_trace.py index 1e52f28350..dd6a424ddb 100644 --- a/api/core/ops/opik_trace/opik_trace.py +++ b/api/core/ops/opik_trace/opik_trace.py @@ -96,8 +96,7 @@ class OpikDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - external_trace_id = trace_info.metadata.get("external_trace_id") - dify_trace_id = external_trace_id or trace_info.workflow_run_id + dify_trace_id = trace_info.trace_id or trace_info.workflow_run_id opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id) workflow_metadata = wrap_metadata( trace_info.metadata, message_id=trace_info.message_id, workflow_app_log_id=trace_info.workflow_app_log_id @@ -105,7 +104,7 @@ class OpikDataTrace(BaseTraceInstance): root_span_id = None if trace_info.message_id: - dify_trace_id = external_trace_id or trace_info.message_id + dify_trace_id = trace_info.trace_id or trace_info.message_id opik_trace_id = prepare_opik_uuid(trace_info.start_time, dify_trace_id) trace_data = { @@ -276,7 +275,7 @@ class OpikDataTrace(BaseTraceInstance): return metadata = trace_info.metadata - message_id = trace_info.message_id + dify_trace_id = trace_info.trace_id or trace_info.message_id user_id = message_data.from_account_id metadata["user_id"] = user_id @@ -291,7 +290,7 @@ class OpikDataTrace(BaseTraceInstance): metadata["end_user_id"] = end_user_id trace_data = { - "id": prepare_opik_uuid(trace_info.start_time, message_id), + "id": prepare_opik_uuid(trace_info.start_time, dify_trace_id), "name": TraceTaskName.MESSAGE_TRACE.value, "start_time": trace_info.start_time, "end_time": trace_info.end_time, @@ -330,7 +329,7 @@ class OpikDataTrace(BaseTraceInstance): start_time = trace_info.start_time or trace_info.message_data.created_at span_data = { - "trace_id": prepare_opik_uuid(start_time, trace_info.message_id), + "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), "name": TraceTaskName.MODERATION_TRACE.value, "type": "tool", "start_time": start_time, @@ -356,7 +355,7 @@ class OpikDataTrace(BaseTraceInstance): start_time = trace_info.start_time or message_data.created_at span_data = { - "trace_id": prepare_opik_uuid(start_time, trace_info.message_id), + "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), "name": TraceTaskName.SUGGESTED_QUESTION_TRACE.value, "type": "tool", "start_time": start_time, @@ -376,7 +375,7 @@ class OpikDataTrace(BaseTraceInstance): start_time = trace_info.start_time or trace_info.message_data.created_at span_data = { - "trace_id": prepare_opik_uuid(start_time, trace_info.message_id), + "trace_id": prepare_opik_uuid(start_time, trace_info.trace_id or trace_info.message_id), "name": TraceTaskName.DATASET_RETRIEVAL_TRACE.value, "type": "tool", "start_time": start_time, @@ -391,7 +390,7 @@ class OpikDataTrace(BaseTraceInstance): def tool_trace(self, trace_info: ToolTraceInfo): span_data = { - "trace_id": prepare_opik_uuid(trace_info.start_time, trace_info.message_id), + "trace_id": prepare_opik_uuid(trace_info.start_time, trace_info.trace_id or trace_info.message_id), "name": trace_info.tool_name, "type": "tool", "start_time": trace_info.start_time, @@ -406,7 +405,7 @@ class OpikDataTrace(BaseTraceInstance): def generate_name_trace(self, trace_info: GenerateNameTraceInfo): trace_data = { - "id": prepare_opik_uuid(trace_info.start_time, trace_info.message_id), + "id": prepare_opik_uuid(trace_info.start_time, trace_info.trace_id or trace_info.message_id), "name": TraceTaskName.GENERATE_NAME_TRACE.value, "start_time": trace_info.start_time, "end_time": trace_info.end_time, diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 91cdc937a6..a607c76beb 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -407,6 +407,7 @@ class TraceTask: def __init__( self, trace_type: Any, + trace_id: Optional[str] = None, message_id: Optional[str] = None, workflow_execution: Optional[WorkflowExecution] = None, conversation_id: Optional[str] = None, @@ -424,6 +425,9 @@ class TraceTask: self.app_id = None self.kwargs = kwargs + external_trace_id = kwargs.get("external_trace_id") + if external_trace_id: + self.trace_id = external_trace_id def execute(self): return self.preprocess() @@ -520,11 +524,8 @@ class TraceTask: "app_id": workflow_run.app_id, } - external_trace_id = self.kwargs.get("external_trace_id") - if external_trace_id: - metadata["external_trace_id"] = external_trace_id - workflow_trace_info = WorkflowTraceInfo( + trace_id=self.trace_id, workflow_data=workflow_run.to_dict(), conversation_id=conversation_id, workflow_id=workflow_id, @@ -584,6 +585,7 @@ class TraceTask: message_tokens = message_data.message_tokens message_trace_info = MessageTraceInfo( + trace_id=self.trace_id, message_id=message_id, message_data=message_data.to_dict(), conversation_model=conversation_mode, @@ -627,6 +629,7 @@ class TraceTask: workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None moderation_trace_info = ModerationTraceInfo( + trace_id=self.trace_id, message_id=workflow_app_log_id or message_id, inputs=inputs, message_data=message_data.to_dict(), @@ -667,6 +670,7 @@ class TraceTask: workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None suggested_question_trace_info = SuggestedQuestionTraceInfo( + trace_id=self.trace_id, message_id=workflow_app_log_id or message_id, message_data=message_data.to_dict(), inputs=message_data.message, @@ -708,6 +712,7 @@ class TraceTask: } dataset_retrieval_trace_info = DatasetRetrievalTraceInfo( + trace_id=self.trace_id, message_id=message_id, inputs=message_data.query or message_data.inputs, documents=[doc.model_dump() for doc in documents] if documents else [], @@ -772,6 +777,7 @@ class TraceTask: ) tool_trace_info = ToolTraceInfo( + trace_id=self.trace_id, message_id=message_id, message_data=message_data.to_dict(), tool_name=tool_name, @@ -807,6 +813,7 @@ class TraceTask: } generate_name_trace_info = GenerateNameTraceInfo( + trace_id=self.trace_id, conversation_id=conversation_id, inputs=inputs, outputs=generate_conversation_name, diff --git a/api/core/ops/weave_trace/weave_trace.py b/api/core/ops/weave_trace/weave_trace.py index 470601b17a..8089860481 100644 --- a/api/core/ops/weave_trace/weave_trace.py +++ b/api/core/ops/weave_trace/weave_trace.py @@ -87,8 +87,7 @@ class WeaveDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - external_trace_id = trace_info.metadata.get("external_trace_id") - trace_id = external_trace_id or trace_info.message_id or trace_info.workflow_run_id + trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id if trace_info.start_time is None: trace_info.start_time = datetime.now() @@ -245,8 +244,12 @@ class WeaveDataTrace(BaseTraceInstance): attributes["start_time"] = trace_info.start_time attributes["end_time"] = trace_info.end_time attributes["tags"] = ["message", str(trace_info.conversation_mode)] + + trace_id = trace_info.trace_id or message_id + attributes["trace_id"] = trace_id + message_run = WeaveTraceModel( - id=message_id, + id=trace_id, op=str(TraceTaskName.MESSAGE_TRACE.value), input_tokens=trace_info.message_tokens, output_tokens=trace_info.answer_tokens, @@ -274,7 +277,7 @@ class WeaveDataTrace(BaseTraceInstance): ) self.start_call( llm_run, - parent_run_id=message_id, + parent_run_id=trace_id, ) self.finish_call(llm_run) self.finish_call(message_run) @@ -289,6 +292,9 @@ class WeaveDataTrace(BaseTraceInstance): attributes["start_time"] = trace_info.start_time or trace_info.message_data.created_at attributes["end_time"] = trace_info.end_time or trace_info.message_data.updated_at + trace_id = trace_info.trace_id or trace_info.message_id + attributes["trace_id"] = trace_id + moderation_run = WeaveTraceModel( id=str(uuid.uuid4()), op=str(TraceTaskName.MODERATION_TRACE.value), @@ -303,7 +309,7 @@ class WeaveDataTrace(BaseTraceInstance): exception=getattr(trace_info, "error", None), file_list=[], ) - self.start_call(moderation_run, parent_run_id=trace_info.message_id) + self.start_call(moderation_run, parent_run_id=trace_id) self.finish_call(moderation_run) def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): @@ -316,6 +322,9 @@ class WeaveDataTrace(BaseTraceInstance): attributes["start_time"] = (trace_info.start_time or message_data.created_at,) attributes["end_time"] = (trace_info.end_time or message_data.updated_at,) + trace_id = trace_info.trace_id or trace_info.message_id + attributes["trace_id"] = trace_id + suggested_question_run = WeaveTraceModel( id=str(uuid.uuid4()), op=str(TraceTaskName.SUGGESTED_QUESTION_TRACE.value), @@ -326,7 +335,7 @@ class WeaveDataTrace(BaseTraceInstance): file_list=[], ) - self.start_call(suggested_question_run, parent_run_id=trace_info.message_id) + self.start_call(suggested_question_run, parent_run_id=trace_id) self.finish_call(suggested_question_run) def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): @@ -338,6 +347,9 @@ class WeaveDataTrace(BaseTraceInstance): attributes["start_time"] = (trace_info.start_time or trace_info.message_data.created_at,) attributes["end_time"] = (trace_info.end_time or trace_info.message_data.updated_at,) + trace_id = trace_info.trace_id or trace_info.message_id + attributes["trace_id"] = trace_id + dataset_retrieval_run = WeaveTraceModel( id=str(uuid.uuid4()), op=str(TraceTaskName.DATASET_RETRIEVAL_TRACE.value), @@ -348,7 +360,7 @@ class WeaveDataTrace(BaseTraceInstance): file_list=[], ) - self.start_call(dataset_retrieval_run, parent_run_id=trace_info.message_id) + self.start_call(dataset_retrieval_run, parent_run_id=trace_id) self.finish_call(dataset_retrieval_run) def tool_trace(self, trace_info: ToolTraceInfo): @@ -357,6 +369,11 @@ class WeaveDataTrace(BaseTraceInstance): attributes["start_time"] = trace_info.start_time attributes["end_time"] = trace_info.end_time + message_id = trace_info.message_id or getattr(trace_info, "conversation_id", None) + message_id = message_id or None + trace_id = trace_info.trace_id or message_id + attributes["trace_id"] = trace_id + tool_run = WeaveTraceModel( id=str(uuid.uuid4()), op=trace_info.tool_name, @@ -366,9 +383,7 @@ class WeaveDataTrace(BaseTraceInstance): attributes=attributes, exception=trace_info.error, ) - message_id = trace_info.message_id or getattr(trace_info, "conversation_id", None) - message_id = message_id or None - self.start_call(tool_run, parent_run_id=message_id) + self.start_call(tool_run, parent_run_id=trace_id) self.finish_call(tool_run) def generate_name_trace(self, trace_info: GenerateNameTraceInfo): From cba5bd588ccb1a10ab35377d14e6a39563230e7e Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 29 Jul 2025 20:54:37 +0800 Subject: [PATCH 057/415] minor fix: wrong position of retry_document_indexing_task time elapsed (#23099) --- api/tasks/retry_document_indexing_task.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/tasks/retry_document_indexing_task.py b/api/tasks/retry_document_indexing_task.py index 2576d7b051..26b41aff2e 100644 --- a/api/tasks/retry_document_indexing_task.py +++ b/api/tasks/retry_document_indexing_task.py @@ -95,8 +95,8 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]): logging.info(click.style(str(ex), fg="yellow")) redis_client.delete(retry_indexing_cache_key) logging.exception("retry_document_indexing_task failed, document_id: %s", document_id) - end_at = time.perf_counter() - logging.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) + end_at = time.perf_counter() + logging.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green")) except Exception as e: logging.exception( "retry_document_indexing_task failed, dataset_id: %s, document_ids: %s", dataset_id, document_ids From ea542d42ca8d28270396baee26fe25247f98d197 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 29 Jul 2025 21:36:32 +0800 Subject: [PATCH 058/415] fix: i18n link in README.md (#23121) --- README.md | 2 +- README_AR.md | 2 +- README_BN.md | 2 +- README_CN.md | 2 +- README_DE.md | 2 +- README_ES.md | 2 +- README_FR.md | 2 +- README_JA.md | 2 +- README_KL.md | 2 +- README_KR.md | 2 +- README_PT.md | 2 +- README_TR.md | 2 +- README_TW.md | 2 +- README_VI.md | 2 +- web/i18n-config/README.md | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 2909e0e6cf..16a1268cb1 100644 --- a/README.md +++ b/README.md @@ -241,7 +241,7 @@ One-Click deploy Dify to Alibaba Cloud with [Alibaba Cloud Data Management](http For those who'd like to contribute code, see our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). At the same time, please consider supporting Dify by sharing it on social media and at events and conferences. -> We are looking for contributors to help translate Dify into languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c). +> We are looking for contributors to help translate Dify into languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c). ## Community & contact diff --git a/README_AR.md b/README_AR.md index e959ca0f78..d2cb0098a3 100644 --- a/README_AR.md +++ b/README_AR.md @@ -223,7 +223,7 @@ docker compose up -d لأولئك الذين يرغبون في المساهمة، انظر إلى [دليل المساهمة](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) لدينا. في الوقت نفسه، يرجى النظر في دعم Dify عن طريق مشاركته على وسائل التواصل الاجتماعي وفي الفعاليات والمؤتمرات. -> نحن نبحث عن مساهمين لمساعدة في ترجمة Dify إلى لغات أخرى غير اللغة الصينية المندرين أو الإنجليزية. إذا كنت مهتمًا بالمساعدة، يرجى الاطلاع على [README للترجمة](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) لمزيد من المعلومات، واترك لنا تعليقًا في قناة `global-users` على [خادم المجتمع على Discord](https://discord.gg/8Tpq4AcN9c). +> نحن نبحث عن مساهمين لمساعدة في ترجمة Dify إلى لغات أخرى غير اللغة الصينية المندرين أو الإنجليزية. إذا كنت مهتمًا بالمساعدة، يرجى الاطلاع على [README للترجمة](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) لمزيد من المعلومات، واترك لنا تعليقًا في قناة `global-users` على [خادم المجتمع على Discord](https://discord.gg/8Tpq4AcN9c). **المساهمون** diff --git a/README_BN.md b/README_BN.md index 29d7374ea5..f57413ec8b 100644 --- a/README_BN.md +++ b/README_BN.md @@ -241,7 +241,7 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন যারা কোড অবদান রাখতে চান, তাদের জন্য আমাদের [অবদান নির্দেশিকা] দেখুন (https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)। একই সাথে, সোশ্যাল মিডিয়া এবং ইভেন্ট এবং কনফারেন্সে এটি শেয়ার করে Dify কে সমর্থন করুন। -> আমরা ম্যান্ডারিন বা ইংরেজি ছাড়া অন্য ভাষায় Dify অনুবাদ করতে সাহায্য করার জন্য অবদানকারীদের খুঁজছি। আপনি যদি সাহায্য করতে আগ্রহী হন, তাহলে আরও তথ্যের জন্য [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) দেখুন এবং আমাদের [ডিসকর্ড কমিউনিটি সার্ভার](https://discord.gg/8Tpq4AcN9c) এর `গ্লোবাল-ইউজারস` চ্যানেলে আমাদের একটি মন্তব্য করুন। +> আমরা ম্যান্ডারিন বা ইংরেজি ছাড়া অন্য ভাষায় Dify অনুবাদ করতে সাহায্য করার জন্য অবদানকারীদের খুঁজছি। আপনি যদি সাহায্য করতে আগ্রহী হন, তাহলে আরও তথ্যের জন্য [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) দেখুন এবং আমাদের [ডিসকর্ড কমিউনিটি সার্ভার](https://discord.gg/8Tpq4AcN9c) এর `গ্লোবাল-ইউজারস` চ্যানেলে আমাদের একটি মন্তব্য করুন। ## কমিউনিটি এবং যোগাযোগ diff --git a/README_CN.md b/README_CN.md index 486a368c09..e9c73eb48b 100644 --- a/README_CN.md +++ b/README_CN.md @@ -244,7 +244,7 @@ docker compose up -d 对于那些想要贡献代码的人,请参阅我们的[贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。 同时,请考虑通过社交媒体、活动和会议来支持 Dify 的分享。 -> 我们正在寻找贡献者来帮助将 Dify 翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助,请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)获取更多信息,并在我们的[Discord 社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。 +> 我们正在寻找贡献者来帮助将 Dify 翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助,请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)获取更多信息,并在我们的[Discord 社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。 **Contributors** diff --git a/README_DE.md b/README_DE.md index fce52c34c2..d31a56542d 100644 --- a/README_DE.md +++ b/README_DE.md @@ -236,7 +236,7 @@ Ein-Klick-Bereitstellung von Dify in der Alibaba Cloud mit [Alibaba Cloud Data M Falls Sie Code beitragen möchten, lesen Sie bitte unseren [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Gleichzeitig bitten wir Sie, Dify zu unterstützen, indem Sie es in den sozialen Medien teilen und auf Veranstaltungen und Konferenzen präsentieren. -> Wir suchen Mitwirkende, die dabei helfen, Dify in weitere Sprachen zu übersetzen – außer Mandarin oder Englisch. Wenn Sie Interesse an einer Mitarbeit haben, lesen Sie bitte die [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) für weitere Informationen und hinterlassen Sie einen Kommentar im `global-users`-Kanal unseres [Discord Community Servers](https://discord.gg/8Tpq4AcN9c). +> Wir suchen Mitwirkende, die dabei helfen, Dify in weitere Sprachen zu übersetzen – außer Mandarin oder Englisch. Wenn Sie Interesse an einer Mitarbeit haben, lesen Sie bitte die [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) für weitere Informationen und hinterlassen Sie einen Kommentar im `global-users`-Kanal unseres [Discord Community Servers](https://discord.gg/8Tpq4AcN9c). ## Gemeinschaft & Kontakt diff --git a/README_ES.md b/README_ES.md index 6fd6dfcee8..918bfe2286 100644 --- a/README_ES.md +++ b/README_ES.md @@ -237,7 +237,7 @@ Para aquellos que deseen contribuir con código, consulten nuestra [Guía de con Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en eventos y conferencias. -> Estamos buscando colaboradores para ayudar con la traducción de Dify a idiomas que no sean el mandarín o el inglés. Si estás interesado en ayudar, consulta el [README de i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) para obtener más información y déjanos un comentario en el canal `global-users` de nuestro [Servidor de Comunidad en Discord](https://discord.gg/8Tpq4AcN9c). +> Estamos buscando colaboradores para ayudar con la traducción de Dify a idiomas que no sean el mandarín o el inglés. Si estás interesado en ayudar, consulta el [README de i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para obtener más información y déjanos un comentario en el canal `global-users` de nuestro [Servidor de Comunidad en Discord](https://discord.gg/8Tpq4AcN9c). **Contribuidores** diff --git a/README_FR.md b/README_FR.md index b2209fb495..56ca878aae 100644 --- a/README_FR.md +++ b/README_FR.md @@ -235,7 +235,7 @@ Pour ceux qui souhaitent contribuer du code, consultez notre [Guide de contribut Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur les réseaux sociaux et lors d'événements et de conférences. -> Nous recherchons des contributeurs pour aider à traduire Dify dans des langues autres que le mandarin ou l'anglais. Si vous êtes intéressé à aider, veuillez consulter le [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) pour plus d'informations, et laissez-nous un commentaire dans le canal `global-users` de notre [Serveur communautaire Discord](https://discord.gg/8Tpq4AcN9c). +> Nous recherchons des contributeurs pour aider à traduire Dify dans des langues autres que le mandarin ou l'anglais. Si vous êtes intéressé à aider, veuillez consulter le [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) pour plus d'informations, et laissez-nous un commentaire dans le canal `global-users` de notre [Serveur communautaire Discord](https://discord.gg/8Tpq4AcN9c). **Contributeurs** diff --git a/README_JA.md b/README_JA.md index c658225f90..6d277a36ed 100644 --- a/README_JA.md +++ b/README_JA.md @@ -234,7 +234,7 @@ docker compose up -d 同時に、DifyをSNSやイベント、カンファレンスで共有してサポートしていただけると幸いです。 -> Difyを英語または中国語以外の言語に翻訳してくれる貢献者を募集しています。興味がある場合は、詳細については[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)を参照してください。また、[Discordコミュニティサーバー](https://discord.gg/8Tpq4AcN9c)の`global-users`チャンネルにコメントを残してください。 +> Difyを英語または中国語以外の言語に翻訳してくれる貢献者を募集しています。興味がある場合は、詳細については[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)を参照してください。また、[Discordコミュニティサーバー](https://discord.gg/8Tpq4AcN9c)の`global-users`チャンネルにコメントを残してください。 **貢献者** diff --git a/README_KL.md b/README_KL.md index bfafcc7407..dac67eeb29 100644 --- a/README_KL.md +++ b/README_KL.md @@ -235,7 +235,7 @@ For those who'd like to contribute code, see our [Contribution Guide](https://gi At the same time, please consider supporting Dify by sharing it on social media and at events and conferences. -> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c). +> We are looking for contributors to help with translating Dify to languages other than Mandarin or English. If you are interested in helping, please see the [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) for more information, and leave us a comment in the `global-users` channel of our [Discord Community Server](https://discord.gg/8Tpq4AcN9c). **Contributors** diff --git a/README_KR.md b/README_KR.md index 282117e776..072481da02 100644 --- a/README_KR.md +++ b/README_KR.md @@ -229,7 +229,7 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했 동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다. -> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요. +> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요. **기여자** diff --git a/README_PT.md b/README_PT.md index 576f6b48f7..1260f8e6fd 100644 --- a/README_PT.md +++ b/README_PT.md @@ -233,7 +233,7 @@ Implante o Dify na Alibaba Cloud com um clique usando o [Alibaba Cloud Data Mana Para aqueles que desejam contribuir com código, veja nosso [Guia de Contribuição](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Ao mesmo tempo, considere apoiar o Dify compartilhando-o nas redes sociais e em eventos e conferências. -> Estamos buscando contribuidores para ajudar na tradução do Dify para idiomas além de Mandarim e Inglês. Se você tiver interesse em ajudar, consulte o [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) para mais informações e deixe-nos um comentário no canal `global-users` em nosso [Servidor da Comunidade no Discord](https://discord.gg/8Tpq4AcN9c). +> Estamos buscando contribuidores para ajudar na tradução do Dify para idiomas além de Mandarim e Inglês. Se você tiver interesse em ajudar, consulte o [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para mais informações e deixe-nos um comentário no canal `global-users` em nosso [Servidor da Comunidade no Discord](https://discord.gg/8Tpq4AcN9c). **Contribuidores** diff --git a/README_TR.md b/README_TR.md index 6e94e54fa0..37953f0de1 100644 --- a/README_TR.md +++ b/README_TR.md @@ -227,7 +227,7 @@ Dify'ı bulut platformuna tek tıklamayla dağıtın [terraform](https://www.ter Kod katkısında bulunmak isteyenler için [Katkı Kılavuzumuza](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) bakabilirsiniz. Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda paylaşarak desteklemeyi düşünün. -> Dify'ı Mandarin veya İngilizce dışındaki dillere çevirmemize yardımcı olacak katkıda bulunanlara ihtiyacımız var. Yardımcı olmakla ilgileniyorsanız, lütfen daha fazla bilgi için [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) dosyasına bakın ve [Discord Topluluk Sunucumuzdaki](https://discord.gg/8Tpq4AcN9c) `global-users` kanalında bize bir yorum bırakın. +> Dify'ı Mandarin veya İngilizce dışındaki dillere çevirmemize yardımcı olacak katkıda bulunanlara ihtiyacımız var. Yardımcı olmakla ilgileniyorsanız, lütfen daha fazla bilgi için [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) dosyasına bakın ve [Discord Topluluk Sunucumuzdaki](https://discord.gg/8Tpq4AcN9c) `global-users` kanalında bize bir yorum bırakın. **Katkıda Bulunanlar** diff --git a/README_TW.md b/README_TW.md index 6e3e22b5c1..f70d6a25f6 100644 --- a/README_TW.md +++ b/README_TW.md @@ -239,7 +239,7 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify 對於想要貢獻程式碼的開發者,請參閱我們的[貢獻指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。 同時,也請考慮透過在社群媒體和各種活動與會議上分享 Dify 來支持我們。 -> 我們正在尋找貢獻者協助將 Dify 翻譯成中文和英文以外的語言。如果您有興趣幫忙,請查看 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) 獲取更多資訊,並在我們的 [Discord 社群伺服器](https://discord.gg/8Tpq4AcN9c) 的 `global-users` 頻道留言給我們。 +> 我們正在尋找貢獻者協助將 Dify 翻譯成中文和英文以外的語言。如果您有興趣幫忙,請查看 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) 獲取更多資訊,並在我們的 [Discord 社群伺服器](https://discord.gg/8Tpq4AcN9c) 的 `global-users` 頻道留言給我們。 ## 社群與聯絡方式 diff --git a/README_VI.md b/README_VI.md index 51314e6de5..ddd9aa95f6 100644 --- a/README_VI.md +++ b/README_VI.md @@ -231,7 +231,7 @@ Triển khai Dify lên Alibaba Cloud chỉ với một cú nhấp chuột bằng Đồng thời, vui lòng xem xét hỗ trợ Dify bằng cách chia sẻ nó trên mạng xã hội và tại các sự kiện và hội nghị. -> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi. +> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi. **Người đóng góp** diff --git a/web/i18n-config/README.md b/web/i18n-config/README.md index 5e7058d829..dacda966dd 100644 --- a/web/i18n-config/README.md +++ b/web/i18n-config/README.md @@ -8,7 +8,6 @@ This directory contains the internationalization (i18n) files for this project. ``` ├── [ 24] README.md -├── [ 0] README_CN.md ├── [ 704] en-US │   ├── [2.4K] app-annotation.ts │   ├── [5.2K] app-api.ts @@ -48,6 +47,7 @@ By default we will use `LanguagesSupported` to determine which languages are sup 1. Create a new folder for the new language. ``` +cd web/i18n cp -r en-US fr-FR ``` From 6914c1c85e0aa6583af344f8f1c35b804ff46fa1 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 29 Jul 2025 21:39:40 +0800 Subject: [PATCH 059/415] fix(web): make iteration panel respect MAX_PARALLEL_LIMIT environment variable (#23083) (#23104) --- .../workflow-parallel-limit.test.tsx | 301 ++++++++++++++++++ web/app/components/workflow/constants.ts | 8 - .../components/workflow/hooks/use-workflow.ts | 8 +- .../workflow/nodes/iteration/panel.tsx | 7 +- web/config/index.ts | 15 +- 5 files changed, 319 insertions(+), 20 deletions(-) create mode 100644 web/__tests__/workflow-parallel-limit.test.tsx diff --git a/web/__tests__/workflow-parallel-limit.test.tsx b/web/__tests__/workflow-parallel-limit.test.tsx new file mode 100644 index 0000000000..0843122ab4 --- /dev/null +++ b/web/__tests__/workflow-parallel-limit.test.tsx @@ -0,0 +1,301 @@ +/** + * MAX_PARALLEL_LIMIT Configuration Bug Test + * + * This test reproduces and verifies the fix for issue #23083: + * MAX_PARALLEL_LIMIT environment variable does not take effect in iteration panel + */ + +import { render, screen } from '@testing-library/react' +import React from 'react' + +// Mock environment variables before importing constants +const originalEnv = process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT + +// Test with different environment values +function setupEnvironment(value?: string) { + if (value) + process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = value + else + delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT + + // Clear module cache to force re-evaluation + jest.resetModules() +} + +function restoreEnvironment() { + if (originalEnv) + process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = originalEnv + else + delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT + + jest.resetModules() +} + +// Mock i18next with proper implementation +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + if (key.includes('MaxParallelismTitle')) return 'Max Parallelism' + if (key.includes('MaxParallelismDesc')) return 'Maximum number of parallel executions' + if (key.includes('parallelMode')) return 'Parallel Mode' + if (key.includes('parallelPanelDesc')) return 'Enable parallel execution' + if (key.includes('errorResponseMethod')) return 'Error Response Method' + return key + }, + }), + initReactI18next: { + type: '3rdParty', + init: jest.fn(), + }, +})) + +// Mock i18next module completely to prevent initialization issues +jest.mock('i18next', () => ({ + use: jest.fn().mockReturnThis(), + init: jest.fn().mockReturnThis(), + t: jest.fn(key => key), + isInitialized: true, +})) + +// Mock the useConfig hook +jest.mock('@/app/components/workflow/nodes/iteration/use-config', () => ({ + __esModule: true, + default: () => ({ + inputs: { + is_parallel: true, + parallel_nums: 5, + error_handle_mode: 'terminated', + }, + changeParallel: jest.fn(), + changeParallelNums: jest.fn(), + changeErrorHandleMode: jest.fn(), + }), +})) + +// Mock other components +jest.mock('@/app/components/workflow/nodes/_base/components/variable/var-reference-picker', () => { + return function MockVarReferencePicker() { + return
    VarReferencePicker
    + } +}) + +jest.mock('@/app/components/workflow/nodes/_base/components/split', () => { + return function MockSplit() { + return
    Split
    + } +}) + +jest.mock('@/app/components/workflow/nodes/_base/components/field', () => { + return function MockField({ title, children }: { title: string, children: React.ReactNode }) { + return ( +
    + + {children} +
    + ) + } +}) + +jest.mock('@/app/components/base/switch', () => { + return function MockSwitch({ defaultValue }: { defaultValue: boolean }) { + return + } +}) + +jest.mock('@/app/components/base/select', () => { + return function MockSelect() { + return + } +}) + +// Use defaultValue to avoid controlled input warnings +jest.mock('@/app/components/base/slider', () => { + return function MockSlider({ value, max, min }: { value: number, max: number, min: number }) { + return ( + + ) + } +}) + +// Use defaultValue to avoid controlled input warnings +jest.mock('@/app/components/base/input', () => { + return function MockInput({ type, max, min, value }: { type: string, max: number, min: number, value: number }) { + return ( + + ) + } +}) + +describe('MAX_PARALLEL_LIMIT Configuration Bug', () => { + const mockNodeData = { + id: 'test-iteration-node', + type: 'iteration' as const, + data: { + title: 'Test Iteration', + desc: 'Test iteration node', + iterator_selector: ['test'], + output_selector: ['output'], + is_parallel: true, + parallel_nums: 5, + error_handle_mode: 'terminated' as const, + }, + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + afterEach(() => { + restoreEnvironment() + }) + + afterAll(() => { + restoreEnvironment() + }) + + describe('Environment Variable Parsing', () => { + it('should parse MAX_PARALLEL_LIMIT from NEXT_PUBLIC_MAX_PARALLEL_LIMIT environment variable', () => { + setupEnvironment('25') + const { MAX_PARALLEL_LIMIT } = require('@/config') + expect(MAX_PARALLEL_LIMIT).toBe(25) + }) + + it('should fallback to default when environment variable is not set', () => { + setupEnvironment() // No environment variable + const { MAX_PARALLEL_LIMIT } = require('@/config') + expect(MAX_PARALLEL_LIMIT).toBe(10) + }) + + it('should handle invalid environment variable values', () => { + setupEnvironment('invalid') + const { MAX_PARALLEL_LIMIT } = require('@/config') + + // Should fall back to default when parsing fails + expect(MAX_PARALLEL_LIMIT).toBe(10) + }) + + it('should handle empty environment variable', () => { + setupEnvironment('') + const { MAX_PARALLEL_LIMIT } = require('@/config') + + // Should fall back to default when empty + expect(MAX_PARALLEL_LIMIT).toBe(10) + }) + + // Edge cases for boundary values + it('should clamp MAX_PARALLEL_LIMIT to MIN when env is 0 or negative', () => { + setupEnvironment('0') + let { MAX_PARALLEL_LIMIT } = require('@/config') + expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default + + setupEnvironment('-5') + ;({ MAX_PARALLEL_LIMIT } = require('@/config')) + expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default + }) + + it('should handle float numbers by parseInt behavior', () => { + setupEnvironment('12.7') + const { MAX_PARALLEL_LIMIT } = require('@/config') + // parseInt truncates to integer + expect(MAX_PARALLEL_LIMIT).toBe(12) + }) + }) + + describe('UI Component Integration (Main Fix Verification)', () => { + it('should render iteration panel with environment-configured max value', () => { + // Set environment variable to a different value + setupEnvironment('30') + + // Import Panel after setting environment + const Panel = require('@/app/components/workflow/nodes/iteration/panel').default + const { MAX_PARALLEL_LIMIT } = require('@/config') + + render( + , + ) + + // Behavior-focused assertion: UI max should equal MAX_PARALLEL_LIMIT + const numberInput = screen.getByTestId('number-input') + expect(numberInput).toHaveAttribute('data-max', String(MAX_PARALLEL_LIMIT)) + + const slider = screen.getByTestId('slider') + expect(slider).toHaveAttribute('data-max', String(MAX_PARALLEL_LIMIT)) + + // Verify the actual values + expect(MAX_PARALLEL_LIMIT).toBe(30) + expect(numberInput.getAttribute('data-max')).toBe('30') + expect(slider.getAttribute('data-max')).toBe('30') + }) + + it('should maintain UI consistency with different environment values', () => { + setupEnvironment('15') + const Panel = require('@/app/components/workflow/nodes/iteration/panel').default + const { MAX_PARALLEL_LIMIT } = require('@/config') + + render( + , + ) + + // Both input and slider should use the same max value from MAX_PARALLEL_LIMIT + const numberInput = screen.getByTestId('number-input') + const slider = screen.getByTestId('slider') + + expect(numberInput.getAttribute('data-max')).toBe(slider.getAttribute('data-max')) + expect(numberInput.getAttribute('data-max')).toBe(String(MAX_PARALLEL_LIMIT)) + }) + }) + + describe('Legacy Constant Verification (For Transition Period)', () => { + // Marked as transition/deprecation tests + it('should maintain MAX_ITERATION_PARALLEL_NUM for backward compatibility', () => { + const { MAX_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants') + expect(typeof MAX_ITERATION_PARALLEL_NUM).toBe('number') + expect(MAX_ITERATION_PARALLEL_NUM).toBe(10) // Hardcoded legacy value + }) + + it('should demonstrate MAX_PARALLEL_LIMIT vs legacy constant difference', () => { + setupEnvironment('50') + const { MAX_PARALLEL_LIMIT } = require('@/config') + const { MAX_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants') + + // MAX_PARALLEL_LIMIT is configurable, MAX_ITERATION_PARALLEL_NUM is not + expect(MAX_PARALLEL_LIMIT).toBe(50) + expect(MAX_ITERATION_PARALLEL_NUM).toBe(10) + expect(MAX_PARALLEL_LIMIT).not.toBe(MAX_ITERATION_PARALLEL_NUM) + }) + }) + + describe('Constants Validation', () => { + it('should validate that required constants exist and have correct types', () => { + const { MAX_PARALLEL_LIMIT } = require('@/config') + const { MIN_ITERATION_PARALLEL_NUM } = require('@/app/components/workflow/constants') + expect(typeof MAX_PARALLEL_LIMIT).toBe('number') + expect(typeof MIN_ITERATION_PARALLEL_NUM).toBe('number') + expect(MAX_PARALLEL_LIMIT).toBeGreaterThanOrEqual(MIN_ITERATION_PARALLEL_NUM) + }) + }) +}) diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts index 0ef4dc9dea..5bf053e2c5 100644 --- a/web/app/components/workflow/constants.ts +++ b/web/app/components/workflow/constants.ts @@ -437,14 +437,6 @@ export const NODE_LAYOUT_HORIZONTAL_PADDING = 60 export const NODE_LAYOUT_VERTICAL_PADDING = 60 export const NODE_LAYOUT_MIN_DISTANCE = 100 -let maxParallelLimit = 10 - -if (process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT && process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT !== '') - maxParallelLimit = Number.parseInt(process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT) -else if (globalThis.document?.body?.getAttribute('data-public-max-parallel-limit') && globalThis.document.body.getAttribute('data-public-max-parallel-limit') !== '') - maxParallelLimit = Number.parseInt(globalThis.document.body.getAttribute('data-public-max-parallel-limit') as string) - -export const PARALLEL_LIMIT = maxParallelLimit export const PARALLEL_DEPTH_LIMIT = 3 export const RETRIEVAL_OUTPUT_STRUCT = `{ diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts index 8bc9d3436f..f9120f45b1 100644 --- a/web/app/components/workflow/hooks/use-workflow.ts +++ b/web/app/components/workflow/hooks/use-workflow.ts @@ -30,7 +30,6 @@ import { } from '../utils' import { PARALLEL_DEPTH_LIMIT, - PARALLEL_LIMIT, SUPPORT_OUTPUT_VARS_NODE, } from '../constants' import { CUSTOM_NOTE_NODE } from '../note-node/constants' @@ -48,6 +47,7 @@ import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/ite import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' import { basePath } from '@/utils/var' import { canFindTool } from '@/utils' +import { MAX_PARALLEL_LIMIT } from '@/config' export const useIsChatMode = () => { const appDetail = useAppStore(s => s.appDetail) @@ -270,8 +270,6 @@ export const useWorkflow = () => { }) setNodes(newNodes) } - - // eslint-disable-next-line react-hooks/exhaustive-deps }, [store]) const isVarUsedInNodes = useCallback((varSelector: ValueSelector) => { @@ -310,9 +308,9 @@ export const useWorkflow = () => { edges, } = store.getState() const connectedEdges = edges.filter(edge => edge.source === nodeId && edge.sourceHandle === nodeHandle) - if (connectedEdges.length > PARALLEL_LIMIT - 1) { + if (connectedEdges.length > MAX_PARALLEL_LIMIT - 1) { const { setShowTips } = workflowStore.getState() - setShowTips(t('workflow.common.parallelTip.limit', { num: PARALLEL_LIMIT })) + setShowTips(t('workflow.common.parallelTip.limit', { num: MAX_PARALLEL_LIMIT })) return false } diff --git a/web/app/components/workflow/nodes/iteration/panel.tsx b/web/app/components/workflow/nodes/iteration/panel.tsx index 4b529f0785..23e93b0dd5 100644 --- a/web/app/components/workflow/nodes/iteration/panel.tsx +++ b/web/app/components/workflow/nodes/iteration/panel.tsx @@ -3,7 +3,7 @@ import React from 'react' import { useTranslation } from 'react-i18next' import VarReferencePicker from '../_base/components/variable/var-reference-picker' import Split from '../_base/components/split' -import { MAX_ITERATION_PARALLEL_NUM, MIN_ITERATION_PARALLEL_NUM } from '../../constants' +import { MIN_ITERATION_PARALLEL_NUM } from '../../constants' import type { IterationNodeType } from './types' import useConfig from './use-config' import { ErrorHandleMode, type NodePanelProps } from '@/app/components/workflow/types' @@ -12,6 +12,7 @@ import Switch from '@/app/components/base/switch' import Select from '@/app/components/base/select' import Slider from '@/app/components/base/slider' import Input from '@/app/components/base/input' +import { MAX_PARALLEL_LIMIT } from '@/config' const i18nPrefix = 'workflow.nodes.iteration' @@ -96,11 +97,11 @@ const Panel: FC> = ({ inputs.is_parallel && (
    {t(`${i18nPrefix}.MaxParallelismDesc`)}
    }>
    - { changeParallelNums(Number(e.target.value)) }} /> + { changeParallelNums(Number(e.target.value)) }} /> diff --git a/web/config/index.ts b/web/config/index.ts index 667723aaaf..4a8b07d6e4 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -13,12 +13,18 @@ const getBooleanConfig = (envVar: string | undefined, dataAttrKey: DatasetAttr, } const getNumberConfig = (envVar: string | undefined, dataAttrKey: DatasetAttr, defaultValue: number) => { - if (envVar) - return Number.parseInt(envVar) + if (envVar) { + const parsed = Number.parseInt(envVar) + if (!Number.isNaN(parsed) && parsed > 0) + return parsed + } const attrValue = globalThis.document?.body?.getAttribute(dataAttrKey) - if (attrValue) - return Number.parseInt(attrValue) + if (attrValue) { + const parsed = Number.parseInt(attrValue) + if (!Number.isNaN(parsed) && parsed > 0) + return parsed + } return defaultValue } @@ -265,6 +271,7 @@ export const FULL_DOC_PREVIEW_LENGTH = 50 export const JSON_SCHEMA_MAX_DEPTH = 10 export const MAX_TOOLS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_TOOLS_NUM, DatasetAttr.DATA_PUBLIC_MAX_TOOLS_NUM, 10) +export const MAX_PARALLEL_LIMIT = getNumberConfig(process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT, DatasetAttr.DATA_PUBLIC_MAX_PARALLEL_LIMIT, 10) export const TEXT_GENERATION_TIMEOUT_MS = getNumberConfig(process.env.NEXT_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, DatasetAttr.DATA_PUBLIC_TEXT_GENERATION_TIMEOUT_MS, 60000) export const LOOP_NODE_MAX_COUNT = getNumberConfig(process.env.NEXT_PUBLIC_LOOP_NODE_MAX_COUNT, DatasetAttr.DATA_PUBLIC_LOOP_NODE_MAX_COUNT, 100) export const MAX_ITERATIONS_NUM = getNumberConfig(process.env.NEXT_PUBLIC_MAX_ITERATIONS_NUM, DatasetAttr.DATA_PUBLIC_MAX_ITERATIONS_NUM, 99) From ab7c2cf000b1567a8ed5c28a9ff8c06dae1e579c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Tue, 29 Jul 2025 21:40:03 +0800 Subject: [PATCH 060/415] minor fix: Object of type int64 is not JSON serializable (#23109) --- api/services/annotation_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index 80dd63bf89..cfa917daf6 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -280,7 +280,7 @@ class AppAnnotationService: try: # Skip the first row - df = pd.read_csv(file) + df = pd.read_csv(file, dtype=str) result = [] for index, row in df.iterrows(): content = {"question": row.iloc[0], "answer": row.iloc[1]} From 72a2c3decf2b53d1c74c8f04ddabe4762b76827c Mon Sep 17 00:00:00 2001 From: baonudesifeizhai <85092850+baonudesifeizhai@users.noreply.github.com> Date: Tue, 29 Jul 2025 09:40:15 -0400 Subject: [PATCH 061/415] Fix/http node timeout validation#23077 (#23117) Co-authored-by: crazywoola <427733928@qq.com> --- .../nodes/http/components/timeout/index.tsx | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/web/app/components/workflow/nodes/http/components/timeout/index.tsx b/web/app/components/workflow/nodes/http/components/timeout/index.tsx index b0fd3b229e..40ebab0e2a 100644 --- a/web/app/components/workflow/nodes/http/components/timeout/index.tsx +++ b/web/app/components/workflow/nodes/http/components/timeout/index.tsx @@ -20,7 +20,7 @@ const InputField: FC<{ description: string placeholder: string value?: number - onChange: (value: number) => void + onChange: (value: number | undefined) => void readOnly?: boolean min: number max: number @@ -35,8 +35,18 @@ const InputField: FC<{ type='number' value={value} onChange={(e) => { - const value = Math.max(min, Math.min(max, Number.parseInt(e.target.value, 10))) - onChange(value) + const inputValue = e.target.value + if (inputValue === '') { + // When user clears the input, set to undefined to let backend use default values + onChange(undefined) + } + else { + const parsedValue = Number.parseInt(inputValue, 10) + if (!Number.isNaN(parsedValue)) { + const value = Math.max(min, Math.min(max, parsedValue)) + onChange(value) + } + } }} placeholder={placeholder} readOnly={readOnly} From 0ea010d7eeba9badaef6cf1d6f4fd94fffcb1dcd Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 30 Jul 2025 10:33:24 +0800 Subject: [PATCH 062/415] fix: metadata API nullable validation consistency issue (#23133) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/console/datasets/metadata.py | 8 +- .../service_api/dataset/metadata.py | 8 +- .../services/test_metadata_bug_complete.py | 189 ++++++++++++++++++ .../services/test_metadata_nullable_bug.py | 108 ++++++++++ 4 files changed, 305 insertions(+), 8 deletions(-) create mode 100644 api/tests/unit_tests/services/test_metadata_bug_complete.py create mode 100644 api/tests/unit_tests/services/test_metadata_nullable_bug.py diff --git a/api/controllers/console/datasets/metadata.py b/api/controllers/console/datasets/metadata.py index b1a83aa371..65f76fb402 100644 --- a/api/controllers/console/datasets/metadata.py +++ b/api/controllers/console/datasets/metadata.py @@ -22,8 +22,8 @@ class DatasetMetadataCreateApi(Resource): @marshal_with(dataset_metadata_fields) def post(self, dataset_id): parser = reqparse.RequestParser() - parser.add_argument("type", type=str, required=True, nullable=True, location="json") - parser.add_argument("name", type=str, required=True, nullable=True, location="json") + parser.add_argument("type", type=str, required=True, nullable=False, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() metadata_args = MetadataArgs(**args) @@ -56,7 +56,7 @@ class DatasetMetadataApi(Resource): @marshal_with(dataset_metadata_fields) def patch(self, dataset_id, metadata_id): parser = reqparse.RequestParser() - parser.add_argument("name", type=str, required=True, nullable=True, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() dataset_id_str = str(dataset_id) @@ -127,7 +127,7 @@ class DocumentMetadataEditApi(Resource): DatasetService.check_dataset_permission(dataset, current_user) parser = reqparse.RequestParser() - parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json") + parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json") args = parser.parse_args() metadata_args = MetadataOperationData(**args) diff --git a/api/controllers/service_api/dataset/metadata.py b/api/controllers/service_api/dataset/metadata.py index 1968696ee5..6ba818c5fc 100644 --- a/api/controllers/service_api/dataset/metadata.py +++ b/api/controllers/service_api/dataset/metadata.py @@ -17,8 +17,8 @@ class DatasetMetadataCreateServiceApi(DatasetApiResource): @cloud_edition_billing_rate_limit_check("knowledge", "dataset") def post(self, tenant_id, dataset_id): parser = reqparse.RequestParser() - parser.add_argument("type", type=str, required=True, nullable=True, location="json") - parser.add_argument("name", type=str, required=True, nullable=True, location="json") + parser.add_argument("type", type=str, required=True, nullable=False, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() metadata_args = MetadataArgs(**args) @@ -43,7 +43,7 @@ class DatasetMetadataServiceApi(DatasetApiResource): @cloud_edition_billing_rate_limit_check("knowledge", "dataset") def patch(self, tenant_id, dataset_id, metadata_id): parser = reqparse.RequestParser() - parser.add_argument("name", type=str, required=True, nullable=True, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") args = parser.parse_args() dataset_id_str = str(dataset_id) @@ -101,7 +101,7 @@ class DocumentMetadataEditServiceApi(DatasetApiResource): DatasetService.check_dataset_permission(dataset, current_user) parser = reqparse.RequestParser() - parser.add_argument("operation_data", type=list, required=True, nullable=True, location="json") + parser.add_argument("operation_data", type=list, required=True, nullable=False, location="json") args = parser.parse_args() metadata_args = MetadataOperationData(**args) diff --git a/api/tests/unit_tests/services/test_metadata_bug_complete.py b/api/tests/unit_tests/services/test_metadata_bug_complete.py new file mode 100644 index 0000000000..c4c7579e83 --- /dev/null +++ b/api/tests/unit_tests/services/test_metadata_bug_complete.py @@ -0,0 +1,189 @@ +from unittest.mock import Mock, patch + +import pytest +from flask_restful import reqparse +from werkzeug.exceptions import BadRequest + +from services.entities.knowledge_entities.knowledge_entities import MetadataArgs +from services.metadata_service import MetadataService + + +class TestMetadataBugCompleteValidation: + """Complete test suite to verify the metadata nullable bug and its fix.""" + + def test_1_pydantic_layer_validation(self): + """Test Layer 1: Pydantic model validation correctly rejects None values.""" + # Pydantic should reject None values for required fields + with pytest.raises((ValueError, TypeError)): + MetadataArgs(type=None, name=None) + + with pytest.raises((ValueError, TypeError)): + MetadataArgs(type="string", name=None) + + with pytest.raises((ValueError, TypeError)): + MetadataArgs(type=None, name="test") + + # Valid values should work + valid_args = MetadataArgs(type="string", name="test_name") + assert valid_args.type == "string" + assert valid_args.name == "test_name" + + def test_2_business_logic_layer_crashes_on_none(self): + """Test Layer 2: Business logic crashes when None values slip through.""" + # Create mock that bypasses Pydantic validation + mock_metadata_args = Mock() + mock_metadata_args.name = None + mock_metadata_args.type = "string" + + with patch("services.metadata_service.current_user") as mock_user: + mock_user.current_tenant_id = "tenant-123" + mock_user.id = "user-456" + + # Should crash with TypeError + with pytest.raises(TypeError, match="object of type 'NoneType' has no len"): + MetadataService.create_metadata("dataset-123", mock_metadata_args) + + # Test update method as well + with patch("services.metadata_service.current_user") as mock_user: + mock_user.current_tenant_id = "tenant-123" + mock_user.id = "user-456" + + with pytest.raises(TypeError, match="object of type 'NoneType' has no len"): + MetadataService.update_metadata_name("dataset-123", "metadata-456", None) + + def test_3_database_constraints_verification(self): + """Test Layer 3: Verify database model has nullable=False constraints.""" + from sqlalchemy import inspect + + from models.dataset import DatasetMetadata + + # Get table info + mapper = inspect(DatasetMetadata) + + # Check that type and name columns are not nullable + type_column = mapper.columns["type"] + name_column = mapper.columns["name"] + + assert type_column.nullable is False, "type column should be nullable=False" + assert name_column.nullable is False, "name column should be nullable=False" + + def test_4_fixed_api_layer_rejects_null(self, app): + """Test Layer 4: Fixed API configuration properly rejects null values.""" + # Test Console API create endpoint (fixed) + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, nullable=False, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") + + with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"): + with pytest.raises(BadRequest): + parser.parse_args() + + # Test with just name being null + with app.test_request_context(json={"type": "string", "name": None}, content_type="application/json"): + with pytest.raises(BadRequest): + parser.parse_args() + + # Test with just type being null + with app.test_request_context(json={"type": None, "name": "test"}, content_type="application/json"): + with pytest.raises(BadRequest): + parser.parse_args() + + def test_5_fixed_api_accepts_valid_values(self, app): + """Test that fixed API still accepts valid non-null values.""" + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, nullable=False, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") + + with app.test_request_context(json={"type": "string", "name": "valid_name"}, content_type="application/json"): + args = parser.parse_args() + assert args["type"] == "string" + assert args["name"] == "valid_name" + + def test_6_simulated_buggy_behavior(self, app): + """Test simulating the original buggy behavior with nullable=True.""" + # Simulate the old buggy configuration + buggy_parser = reqparse.RequestParser() + buggy_parser.add_argument("type", type=str, required=True, nullable=True, location="json") + buggy_parser.add_argument("name", type=str, required=True, nullable=True, location="json") + + with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"): + # This would pass in the buggy version + args = buggy_parser.parse_args() + assert args["type"] is None + assert args["name"] is None + + # But would crash when trying to create MetadataArgs + with pytest.raises((ValueError, TypeError)): + MetadataArgs(**args) + + def test_7_end_to_end_validation_layers(self): + """Test all validation layers work together correctly.""" + # Layer 1: API should reject null at parameter level (with fix) + # Layer 2: Pydantic should reject null at model level + # Layer 3: Business logic expects non-null + # Layer 4: Database enforces non-null + + # Test that valid data flows through all layers + valid_data = {"type": "string", "name": "test_metadata"} + + # Should create valid Pydantic object + metadata_args = MetadataArgs(**valid_data) + assert metadata_args.type == "string" + assert metadata_args.name == "test_metadata" + + # Should not crash in business logic length check + assert len(metadata_args.name) <= 255 # This should not crash + assert len(metadata_args.type) > 0 # This should not crash + + def test_8_verify_specific_fix_locations(self): + """Verify that the specific locations mentioned in bug report are fixed.""" + # Read the actual files to verify fixes + import os + + # Console API create + console_create_file = "api/controllers/console/datasets/metadata.py" + if os.path.exists(console_create_file): + with open(console_create_file) as f: + content = f.read() + # Should contain nullable=False, not nullable=True + assert "nullable=True" not in content.split("class DatasetMetadataCreateApi")[1].split("class")[0] + + # Service API create + service_create_file = "api/controllers/service_api/dataset/metadata.py" + if os.path.exists(service_create_file): + with open(service_create_file) as f: + content = f.read() + # Should contain nullable=False, not nullable=True + create_api_section = content.split("class DatasetMetadataCreateServiceApi")[1].split("class")[0] + assert "nullable=True" not in create_api_section + + +class TestMetadataValidationSummary: + """Summary tests that demonstrate the complete validation architecture.""" + + def test_validation_layer_architecture(self): + """Document and test the 4-layer validation architecture.""" + # Layer 1: API Parameter Validation (Flask-RESTful reqparse) + # - Role: First line of defense, validates HTTP request parameters + # - Fixed: nullable=False ensures null values are rejected at API boundary + + # Layer 2: Pydantic Model Validation + # - Role: Validates data structure and types before business logic + # - Working: Required fields without Optional[] reject None values + + # Layer 3: Business Logic Validation + # - Role: Domain-specific validation (length checks, uniqueness, etc.) + # - Vulnerable: Direct len() calls crash on None values + + # Layer 4: Database Constraints + # - Role: Final data integrity enforcement + # - Working: nullable=False prevents None values in database + + # The bug was: Layer 1 allowed None, but Layers 2-4 expected non-None + # The fix: Make Layer 1 consistent with Layers 2-4 + + assert True # This test documents the architecture + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/api/tests/unit_tests/services/test_metadata_nullable_bug.py b/api/tests/unit_tests/services/test_metadata_nullable_bug.py new file mode 100644 index 0000000000..ef4d05c1d9 --- /dev/null +++ b/api/tests/unit_tests/services/test_metadata_nullable_bug.py @@ -0,0 +1,108 @@ +from unittest.mock import Mock, patch + +import pytest +from flask_restful import reqparse + +from services.entities.knowledge_entities.knowledge_entities import MetadataArgs +from services.metadata_service import MetadataService + + +class TestMetadataNullableBug: + """Test case to reproduce the metadata nullable validation bug.""" + + def test_metadata_args_with_none_values_should_fail(self): + """Test that MetadataArgs validation should reject None values.""" + # This test demonstrates the expected behavior - should fail validation + with pytest.raises((ValueError, TypeError)): + # This should fail because Pydantic expects non-None values + MetadataArgs(type=None, name=None) + + def test_metadata_service_create_with_none_name_crashes(self): + """Test that MetadataService.create_metadata crashes when name is None.""" + # Mock the MetadataArgs to bypass Pydantic validation + mock_metadata_args = Mock() + mock_metadata_args.name = None # This will cause len() to crash + mock_metadata_args.type = "string" + + with patch("services.metadata_service.current_user") as mock_user: + mock_user.current_tenant_id = "tenant-123" + mock_user.id = "user-456" + + # This should crash with TypeError when calling len(None) + with pytest.raises(TypeError, match="object of type 'NoneType' has no len"): + MetadataService.create_metadata("dataset-123", mock_metadata_args) + + def test_metadata_service_update_with_none_name_crashes(self): + """Test that MetadataService.update_metadata_name crashes when name is None.""" + with patch("services.metadata_service.current_user") as mock_user: + mock_user.current_tenant_id = "tenant-123" + mock_user.id = "user-456" + + # This should crash with TypeError when calling len(None) + with pytest.raises(TypeError, match="object of type 'NoneType' has no len"): + MetadataService.update_metadata_name("dataset-123", "metadata-456", None) + + def test_api_parser_accepts_null_values(self, app): + """Test that API parser configuration incorrectly accepts null values.""" + # Simulate the current API parser configuration + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, nullable=True, location="json") + parser.add_argument("name", type=str, required=True, nullable=True, location="json") + + # Simulate request data with null values + with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"): + # This should parse successfully due to nullable=True + args = parser.parse_args() + + # Verify that null values are accepted + assert args["type"] is None + assert args["name"] is None + + # This demonstrates the bug: API accepts None but business logic will crash + + def test_integration_bug_scenario(self, app): + """Test the complete bug scenario from API to service layer.""" + # Step 1: API parser accepts null values (current buggy behavior) + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, nullable=True, location="json") + parser.add_argument("name", type=str, required=True, nullable=True, location="json") + + with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"): + args = parser.parse_args() + + # Step 2: Try to create MetadataArgs with None values + # This should fail at Pydantic validation level + with pytest.raises((ValueError, TypeError)): + metadata_args = MetadataArgs(**args) + + # Step 3: If we bypass Pydantic (simulating the bug scenario) + # Move this outside the request context to avoid Flask-Login issues + mock_metadata_args = Mock() + mock_metadata_args.name = None # From args["name"] + mock_metadata_args.type = None # From args["type"] + + with patch("services.metadata_service.current_user") as mock_user: + mock_user.current_tenant_id = "tenant-123" + mock_user.id = "user-456" + + # Step 4: Service layer crashes on len(None) + with pytest.raises(TypeError, match="object of type 'NoneType' has no len"): + MetadataService.create_metadata("dataset-123", mock_metadata_args) + + def test_correct_nullable_false_configuration_works(self, app): + """Test that the correct nullable=False configuration works as expected.""" + # This tests the FIXED configuration + parser = reqparse.RequestParser() + parser.add_argument("type", type=str, required=True, nullable=False, location="json") + parser.add_argument("name", type=str, required=True, nullable=False, location="json") + + with app.test_request_context(json={"type": None, "name": None}, content_type="application/json"): + # This should fail with BadRequest due to nullable=False + from werkzeug.exceptions import BadRequest + + with pytest.raises(BadRequest): + parser.parse_args() + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From f17ca26b10f6cf0d754fc82d57035496fc87d205 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Wed, 30 Jul 2025 10:34:24 +0800 Subject: [PATCH 063/415] Fix: add missing db.session.close() to ensure proper session cleanup (#23122) --- api/tasks/add_document_to_index_task.py | 2 ++ api/tasks/create_segment_to_index_task.py | 1 + api/tasks/document_indexing_sync_task.py | 2 ++ 3 files changed, 5 insertions(+) diff --git a/api/tasks/add_document_to_index_task.py b/api/tasks/add_document_to_index_task.py index a2105f8a9d..c5ee4ce3f9 100644 --- a/api/tasks/add_document_to_index_task.py +++ b/api/tasks/add_document_to_index_task.py @@ -32,6 +32,7 @@ def add_document_to_index_task(dataset_document_id: str): return if dataset_document.indexing_status != "completed": + db.session.close() return indexing_cache_key = f"document_{dataset_document.id}_indexing" @@ -112,3 +113,4 @@ def add_document_to_index_task(dataset_document_id: str): db.session.commit() finally: redis_client.delete(indexing_cache_key) + db.session.close() diff --git a/api/tasks/create_segment_to_index_task.py b/api/tasks/create_segment_to_index_task.py index a8839ffc17..543a512851 100644 --- a/api/tasks/create_segment_to_index_task.py +++ b/api/tasks/create_segment_to_index_task.py @@ -31,6 +31,7 @@ def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]] return if segment.status != "waiting": + db.session.close() return indexing_cache_key = f"segment_{segment.id}_indexing" diff --git a/api/tasks/document_indexing_sync_task.py b/api/tasks/document_indexing_sync_task.py index 56f330b964..993b2ac404 100644 --- a/api/tasks/document_indexing_sync_task.py +++ b/api/tasks/document_indexing_sync_task.py @@ -113,3 +113,5 @@ def document_indexing_sync_task(dataset_id: str, document_id: str): logging.info(click.style(str(ex), fg="yellow")) except Exception: logging.exception("document_indexing_sync_task failed, document_id: %s", document_id) + finally: + db.session.close() From ab163a5f75fb1857e486b496c3831f3a8dc3c768 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Wed, 30 Jul 2025 10:34:51 +0800 Subject: [PATCH 064/415] Chore: use `Workflow.VERSION_DRAFT` instead of hardcoded `draft` (#23136) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/tools/workflow_as_tool/tool.py | 2 +- api/services/workflow/workflow_converter.py | 2 +- api/services/workflow_service.py | 16 +++++++++------- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index 962b9f7a81..db6b84082f 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -142,7 +142,7 @@ class WorkflowTool(Tool): if not version: workflow = ( db.session.query(Workflow) - .where(Workflow.app_id == app_id, Workflow.version != "draft") + .where(Workflow.app_id == app_id, Workflow.version != Workflow.VERSION_DRAFT) .order_by(Workflow.created_at.desc()) .first() ) diff --git a/api/services/workflow/workflow_converter.py b/api/services/workflow/workflow_converter.py index abf6824d73..afcf1f7621 100644 --- a/api/services/workflow/workflow_converter.py +++ b/api/services/workflow/workflow_converter.py @@ -185,7 +185,7 @@ class WorkflowConverter: tenant_id=app_model.tenant_id, app_id=app_model.id, type=WorkflowType.from_app_mode(new_app_mode).value, - version="draft", + version=Workflow.VERSION_DRAFT, graph=json.dumps(graph), features=json.dumps(features), created_by=account_id, diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index e9f21fc5f1..8588144980 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -105,7 +105,9 @@ class WorkflowService: workflow = ( db.session.query(Workflow) .where( - Workflow.tenant_id == app_model.tenant_id, Workflow.app_id == app_model.id, Workflow.version == "draft" + Workflow.tenant_id == app_model.tenant_id, + Workflow.app_id == app_model.id, + Workflow.version == Workflow.VERSION_DRAFT, ) .first() ) @@ -219,7 +221,7 @@ class WorkflowService: tenant_id=app_model.tenant_id, app_id=app_model.id, type=WorkflowType.from_app_mode(app_model.mode).value, - version="draft", + version=Workflow.VERSION_DRAFT, graph=json.dumps(graph), features=json.dumps(features), created_by=account.id, @@ -257,7 +259,7 @@ class WorkflowService: draft_workflow_stmt = select(Workflow).where( Workflow.tenant_id == app_model.tenant_id, Workflow.app_id == app_model.id, - Workflow.version == "draft", + Workflow.version == Workflow.VERSION_DRAFT, ) draft_workflow = session.scalar(draft_workflow_stmt) if not draft_workflow: @@ -382,9 +384,9 @@ class WorkflowService: tenant_id=app_model.tenant_id, ) - eclosing_node_type_and_id = draft_workflow.get_enclosing_node_type_and_id(node_config) - if eclosing_node_type_and_id: - _, enclosing_node_id = eclosing_node_type_and_id + enclosing_node_type_and_id = draft_workflow.get_enclosing_node_type_and_id(node_config) + if enclosing_node_type_and_id: + _, enclosing_node_id = enclosing_node_type_and_id else: enclosing_node_id = None @@ -644,7 +646,7 @@ class WorkflowService: raise ValueError(f"Workflow with ID {workflow_id} not found") # Check if workflow is a draft version - if workflow.version == "draft": + if workflow.version == Workflow.VERSION_DRAFT: raise DraftWorkflowDeletionError("Cannot delete draft workflow versions") # Check if this workflow is currently referenced by an app From 0b44edaca97cfa4b96aeb0c308c748f0ffbab7c4 Mon Sep 17 00:00:00 2001 From: znn Date: Wed, 30 Jul 2025 08:06:03 +0530 Subject: [PATCH 065/415] request fail when no api key (#23135) --- api/core/workflow/nodes/http_request/executor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index 8ac1ae8526..fe103c7117 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -265,9 +265,9 @@ class Executor: if not authorization.config.header: authorization.config.header = "Authorization" - if self.auth.config.type == "bearer": + if self.auth.config.type == "bearer" and authorization.config.api_key: headers[authorization.config.header] = f"Bearer {authorization.config.api_key}" - elif self.auth.config.type == "basic": + elif self.auth.config.type == "basic" and authorization.config.api_key: credentials = authorization.config.api_key if ":" in credentials: encoded_credentials = base64.b64encode(credentials.encode("utf-8")).decode("utf-8") From a51998e4aa0c81730562d8ef22eb7b31f4258227 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Wed, 30 Jul 2025 10:37:06 +0800 Subject: [PATCH 066/415] Fix: prevent KeyError in validate_api_list by correcting logical check (#23126) --- api/services/external_knowledge_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/services/external_knowledge_service.py b/api/services/external_knowledge_service.py index b7af03e91f..2f1babba6f 100644 --- a/api/services/external_knowledge_service.py +++ b/api/services/external_knowledge_service.py @@ -46,9 +46,9 @@ class ExternalDatasetService: def validate_api_list(cls, api_settings: dict): if not api_settings: raise ValueError("api list is empty") - if "endpoint" not in api_settings and not api_settings["endpoint"]: + if not api_settings.get("endpoint"): raise ValueError("endpoint is required") - if "api_key" not in api_settings and not api_settings["api_key"]: + if not api_settings.get("api_key"): raise ValueError("api_key is required") @staticmethod From a3ef869db69ddb540fa029c7c243da3d3ec34c45 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 30 Jul 2025 10:37:44 +0800 Subject: [PATCH 067/415] fix(i18n): clean up unused keys and fix nesting & placeholder issues (#23123) --- web/i18n-config/README.md | 2 +- web/i18n/de-DE/app-debug.ts | 1 - web/i18n/de-DE/app.ts | 19 --------- web/i18n/de-DE/billing.ts | 54 +++++++++++--------------- web/i18n/de-DE/common.ts | 1 - web/i18n/de-DE/dataset-creation.ts | 2 - web/i18n/de-DE/dataset-documents.ts | 2 - web/i18n/de-DE/dataset-hit-testing.ts | 1 - web/i18n/de-DE/login.ts | 1 - web/i18n/de-DE/run-log.ts | 1 - web/i18n/de-DE/tools.ts | 1 - web/i18n/de-DE/workflow.ts | 5 --- web/i18n/es-ES/app-debug.ts | 15 ------- web/i18n/es-ES/app.ts | 14 ------- web/i18n/es-ES/billing.ts | 54 +++++++++++--------------- web/i18n/es-ES/common.ts | 1 - web/i18n/es-ES/dataset-creation.ts | 2 - web/i18n/es-ES/dataset-documents.ts | 1 - web/i18n/es-ES/dataset-hit-testing.ts | 1 - web/i18n/es-ES/login.ts | 1 - web/i18n/es-ES/tools.ts | 1 - web/i18n/es-ES/workflow.ts | 10 +---- web/i18n/fa-IR/workflow.ts | 2 - web/i18n/fr-FR/workflow.ts | 2 - web/i18n/hi-IN/workflow.ts | 2 - web/i18n/it-IT/workflow.ts | 2 - web/i18n/ja-JP/app-annotation.ts | 3 -- web/i18n/ja-JP/app-debug.ts | 2 +- web/i18n/ja-JP/app.ts | 14 ------- web/i18n/ja-JP/common.ts | 1 - web/i18n/ja-JP/login.ts | 1 - web/i18n/ja-JP/tools.ts | 1 - web/i18n/ja-JP/workflow.ts | 3 -- web/i18n/ko-KR/app-debug.ts | 15 ------- web/i18n/ko-KR/app.ts | 21 +--------- web/i18n/ko-KR/billing.ts | 56 +++++++++++---------------- web/i18n/ko-KR/common.ts | 1 - web/i18n/ko-KR/dataset-creation.ts | 2 - web/i18n/ko-KR/dataset-documents.ts | 1 - web/i18n/ko-KR/dataset-hit-testing.ts | 1 - web/i18n/ko-KR/login.ts | 1 - web/i18n/ko-KR/tools.ts | 1 - web/i18n/ko-KR/workflow.ts | 7 ---- web/i18n/pl-PL/workflow.ts | 2 - web/i18n/pt-BR/workflow.ts | 2 - web/i18n/ro-RO/workflow.ts | 2 - web/i18n/ru-RU/workflow.ts | 2 - web/i18n/th-TH/workflow.ts | 1 - web/i18n/tr-TR/workflow.ts | 2 - web/i18n/uk-UA/workflow.ts | 2 - web/i18n/vi-VN/workflow.ts | 2 - web/i18n/zh-Hant/workflow.ts | 2 - 52 files changed, 70 insertions(+), 276 deletions(-) diff --git a/web/i18n-config/README.md b/web/i18n-config/README.md index dacda966dd..8a69b92c36 100644 --- a/web/i18n-config/README.md +++ b/web/i18n-config/README.md @@ -36,7 +36,7 @@ This directory contains the internationalization (i18n) files for this project. We use English as the default language. The i18n files are organized by language and then by module. For example, the English translation for the `app` module is in `en-US/app.ts`. -If you want to add a new language or modify an existing translation, you can create a new file for the language or modify the existing file. The file name should be the language code (e.g., `zh-CN` for Chinese) and the file extension should be `.ts`. +If you want to add a new language or modify an existing translation, you can create a new file for the language or modify the existing file. The file name should be the language code (e.g., `zh-Hans` for Chinese) and the file extension should be `.ts`. For example, if you want to add french translation, you can create a new folder `fr-FR` and add the translation files in it. diff --git a/web/i18n/de-DE/app-debug.ts b/web/i18n/de-DE/app-debug.ts index 1d7ebc3854..93511faf55 100644 --- a/web/i18n/de-DE/app-debug.ts +++ b/web/i18n/de-DE/app-debug.ts @@ -276,7 +276,6 @@ const translation = { queryNoBeEmpty: 'Anfrage muss im Prompt gesetzt sein', }, variableConfig: { - modalTitle: 'Feldeinstellungen', description: 'Einstellung für Variable {{varName}}', fieldType: 'Feldtyp', string: 'Kurztext', diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index 31221e8f0b..0013a89561 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -2,7 +2,6 @@ const translation = { createApp: 'Neue App erstellen', types: { all: 'Alle', - assistant: 'Assistent', completion: 'Vervollständigung', workflow: 'Arbeitsablauf', agent: 'Agent', @@ -11,8 +10,6 @@ const translation = { advanced: 'Chatflow', }, modes: { - completion: 'Textgenerator', - chat: 'Basisassistent', }, createFromConfigFile: 'App aus Konfigurationsdatei erstellen', deleteAppConfirmTitle: 'Diese App löschen?', @@ -24,11 +21,8 @@ const translation = { communityIntro: 'Diskutieren Sie mit Teammitgliedern, Mitwirkenden und Entwicklern auf verschiedenen Kanälen.', roadmap: 'Sehen Sie unseren Fahrplan', - appNamePlaceholder: 'Bitte geben Sie den Namen der App ein', newApp: { - startToCreate: 'Lassen Sie uns mit Ihrer neuen App beginnen', captionName: 'App-Symbol & Name', - captionAppType: 'Welchen Typ von App möchten Sie erstellen?', previewDemo: 'Vorschau-Demo', chatApp: 'Assistent', chatAppIntro: @@ -46,25 +40,12 @@ const translation = { appTypeRequired: 'Bitte wählen Sie einen App-Typ', appCreated: 'App erstellt', appCreateFailed: 'Erstellen der App fehlgeschlagen', - basic: 'Grundlegend', - chatbotType: 'Chatbot-Orchestrierungsmethode', - workflowDescription: 'Erstellen Sie eine Anwendung, die qualitativ hochwertigen Text auf der Grundlage von Workflow-Orchestrierungen mit einem hohen Maß an Anpassung generiert. Es ist für erfahrene Benutzer geeignet.', - advancedFor: 'Für Fortgeschrittene', startFromTemplate: 'Aus Vorlage erstellen', appNamePlaceholder: 'Geben Sie Ihrer App einen Namen', startFromBlank: 'Aus Leer erstellen', - basicTip: 'Für Anfänger können Sie später zu Chatflow wechseln', - basicDescription: 'Basic Orchestrate ermöglicht die Orchestrierung einer Chatbot-App mit einfachen Einstellungen, ohne die Möglichkeit, integrierte Eingabeaufforderungen zu ändern. Es ist für Anfänger geeignet.', workflowWarning: 'Derzeit in der Beta-Phase', - advancedDescription: 'Workflow Orchestrate orchestriert Chatbots in Form von Workflows und bietet ein hohes Maß an Individualisierung, einschließlich der Möglichkeit, integrierte Eingabeaufforderungen zu bearbeiten. Es ist für erfahrene Benutzer geeignet.', - basicFor: 'FÜR ANFÄNGER', - completionWarning: 'Diese Art von App wird nicht mehr unterstützt.', - chatbotDescription: 'Erstellen Sie eine chatbasierte Anwendung. Diese App verwendet ein Frage-und-Antwort-Format, das mehrere Runden kontinuierlicher Konversation ermöglicht.', captionDescription: 'Beschreibung', - advanced: 'Chatflow', useTemplate: 'Diese Vorlage verwenden', - agentDescription: 'Erstellen Sie einen intelligenten Agenten, der autonom Werkzeuge auswählen kann, um die Aufgaben zu erledigen', - completionDescription: 'Erstellen Sie eine Anwendung, die qualitativ hochwertigen Text auf der Grundlage von Eingabeaufforderungen generiert, z. B. zum Generieren von Artikeln, Zusammenfassungen, Übersetzungen und mehr.', appDescriptionPlaceholder: 'Geben Sie die Beschreibung der App ein', caution: 'Vorsicht', Confirm: 'Bestätigen', diff --git a/web/i18n/de-DE/billing.ts b/web/i18n/de-DE/billing.ts index f0a0f1990a..656b95c257 100644 --- a/web/i18n/de-DE/billing.ts +++ b/web/i18n/de-DE/billing.ts @@ -23,18 +23,13 @@ const translation = { contractSales: 'Vertrieb kontaktieren', contractOwner: 'Teammanager kontaktieren', startForFree: 'Kostenlos starten', - getStartedWith: 'Beginnen Sie mit ', contactSales: 'Vertrieb kontaktieren', talkToSales: 'Mit dem Vertrieb sprechen', modelProviders: 'Modellanbieter', - teamMembers: 'Teammitglieder', buildApps: 'Apps bauen', vectorSpace: 'Vektorraum', - vectorSpaceBillingTooltip: 'Jedes 1MB kann ungefähr 1,2 Millionen Zeichen an vektorisierten Daten speichern (geschätzt mit OpenAI Embeddings, variiert je nach Modell).', vectorSpaceTooltip: 'Vektorraum ist das Langzeitspeichersystem, das erforderlich ist, damit LLMs Ihre Daten verstehen können.', - documentsUploadQuota: 'Dokumenten-Upload-Kontingent', documentProcessingPriority: 'Priorität der Dokumentenverarbeitung', - documentProcessingPriorityTip: 'Für eine höhere Dokumentenverarbeitungspriorität, bitte Ihren Tarif upgraden.', documentProcessingPriorityUpgrade: 'Mehr Daten mit höherer Genauigkeit bei schnelleren Geschwindigkeiten verarbeiten.', priority: { 'standard': 'Standard', @@ -103,61 +98,52 @@ const translation = { sandbox: { name: 'Sandbox', description: '200 mal GPT kostenlos testen', - includesTitle: 'Beinhaltet:', for: 'Kostenlose Testversion der Kernfunktionen', }, professional: { name: 'Professionell', description: 'Für Einzelpersonen und kleine Teams, um mehr Leistung erschwinglich freizuschalten.', - includesTitle: 'Alles im kostenlosen Tarif, plus:', for: 'Für unabhängige Entwickler/kleine Teams', }, team: { name: 'Team', description: 'Zusammenarbeiten ohne Grenzen und Top-Leistung genießen.', - includesTitle: 'Alles im Professionell-Tarif, plus:', for: 'Für mittelgroße Teams', }, enterprise: { name: 'Unternehmen', description: 'Erhalten Sie volle Fähigkeiten und Unterstützung für großangelegte, missionskritische Systeme.', includesTitle: 'Alles im Team-Tarif, plus:', - features: { - 2: 'Exklusive Unternehmensfunktionen', - 8: 'Professioneller technischer Support', - 6: 'Erweiterte Sicherheits- und Kontrollsysteme', - 4: 'SSO', - 0: 'Enterprise-Grade Skalierbare Bereitstellungslösungen', - 3: 'Mehrere Arbeitsbereiche und Unternehmensverwaltung', - 1: 'Kommerzielle Lizenzgenehmigung', - 5: 'Verhandelte SLAs durch Dify-Partner', - 7: 'Updates und Wartung von Dify offiziell', - }, btnText: 'Vertrieb kontaktieren', price: 'Benutzerdefiniert', priceTip: 'Jährliche Abrechnung nur', for: 'Für große Teams', + features: [ + 'Skalierbare Bereitstellungslösungen in Unternehmensqualität', + 'Kommerzielle Lizenzierung', + 'Exklusive Enterprise-Funktionen', + 'Mehrere Arbeitsbereiche und Unternehmensverwaltung', + 'SSO (Single Sign-On)', + 'Vereinbarte SLAs mit Dify-Partnern', + 'Erweiterte Sicherheitsfunktionen und Kontrollen', + 'Offizielle Updates und Wartung durch Dify', + 'Professioneller technischer Support', + ], }, community: { - features: { - 2: 'Entspricht der Dify Open Source Lizenz', - 1: 'Einzelner Arbeitsbereich', - 0: 'Alle Kernfunktionen wurden im öffentlichen Repository veröffentlicht.', - }, description: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte', for: 'Für Einzelbenutzer, kleine Teams oder nicht-kommerzielle Projekte', btnText: 'Beginnen Sie mit der Gemeinschaft', price: 'Kostenlos', includesTitle: 'Kostenlose Funktionen:', name: 'Gemeinschaft', + features: [ + 'Alle Kernfunktionen im öffentlichen Repository veröffentlicht', + 'Einzelner Arbeitsbereich', + 'Entspricht der Dify Open-Source-Lizenz', + ], }, premium: { - features: { - 2: 'WebApp-Logo und Branding-Anpassung', - 0: 'Selbstverwaltete Zuverlässigkeit durch verschiedene Cloud-Anbieter', - 3: 'Priorisierte E-Mail- und Chat-Unterstützung', - 1: 'Einzelner Arbeitsbereich', - }, includesTitle: 'Alles aus der Community, plus:', name: 'Premium', priceTip: 'Basierend auf dem Cloud-Marktplatz', @@ -166,6 +152,12 @@ const translation = { comingSoon: 'Microsoft Azure- und Google Cloud-Support demnächst verfügbar', description: 'Für mittelgroße Organisationen und Teams', price: 'Skalierbar', + features: [ + 'Selbstverwaltete Zuverlässigkeit durch verschiedene Cloud-Anbieter', + 'Einzelner Arbeitsbereich', + 'Anpassung von WebApp-Logo und Branding', + 'Bevorzugter E-Mail- und Chat-Support', + ], }, }, vectorSpace: { @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Upgraden Sie Ihren Tarif, um mehr Speicherplatz zu erhalten.', }, apps: { - fullTipLine1: 'Upgraden Sie Ihren Tarif, um', - fullTipLine2: 'mehr Apps zu bauen.', contactUs: 'Kontaktieren Sie uns', fullTip1: 'Upgrade, um mehr Apps zu erstellen', fullTip2des: 'Es wird empfohlen, inaktive Anwendungen zu bereinigen, um Speicherplatz freizugeben, oder uns zu kontaktieren.', diff --git a/web/i18n/de-DE/common.ts b/web/i18n/de-DE/common.ts index b8efe31ebc..a533cbf175 100644 --- a/web/i18n/de-DE/common.ts +++ b/web/i18n/de-DE/common.ts @@ -197,7 +197,6 @@ const translation = { showAppLength: '{{length}} Apps anzeigen', delete: 'Konto löschen', deleteTip: 'Wenn Sie Ihr Konto löschen, werden alle Ihre Daten dauerhaft gelöscht und können nicht wiederhergestellt werden.', - deleteConfirmTip: 'Zur Bestätigung senden Sie bitte Folgendes von Ihrer registrierten E-Mail-Adresse an ', myAccount: 'Mein Konto', studio: 'Dify Studio', account: 'Konto', diff --git a/web/i18n/de-DE/dataset-creation.ts b/web/i18n/de-DE/dataset-creation.ts index a34533806b..a26feb314a 100644 --- a/web/i18n/de-DE/dataset-creation.ts +++ b/web/i18n/de-DE/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Wissen erstellen', - update: 'Daten hinzufügen', fallbackRoute: 'Wissen', }, one: 'Datenquelle wählen', diff --git a/web/i18n/de-DE/dataset-documents.ts b/web/i18n/de-DE/dataset-documents.ts index 70f1f29cf7..438bcb708d 100644 --- a/web/i18n/de-DE/dataset-documents.ts +++ b/web/i18n/de-DE/dataset-documents.ts @@ -146,7 +146,6 @@ const translation = { journalConferenceName: 'Zeitschrift/Konferenzname', volumeIssuePage: 'Band/Ausgabe/Seite', DOI: 'DOI', - topicKeywords: 'Themen/Schlüsselwörter', abstract: 'Zusammenfassung', topicsKeywords: 'Themen/Stichworte', }, @@ -343,7 +342,6 @@ const translation = { keywords: 'Schlüsselwörter', addKeyWord: 'Schlüsselwort hinzufügen', keywordError: 'Die maximale Länge des Schlüsselworts beträgt 20', - characters: 'Zeichen', hitCount: 'Abrufanzahl', vectorHash: 'Vektor-Hash: ', questionPlaceholder: 'Frage hier hinzufügen', diff --git a/web/i18n/de-DE/dataset-hit-testing.ts b/web/i18n/de-DE/dataset-hit-testing.ts index 840a3c81f1..eb8c80c31d 100644 --- a/web/i18n/de-DE/dataset-hit-testing.ts +++ b/web/i18n/de-DE/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Abruf-Test', desc: 'Testen Sie die Treffereffektivität des Wissens anhand des gegebenen Abfragetextes.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Kürzlich', table: { header: { source: 'Quelle', diff --git a/web/i18n/de-DE/login.ts b/web/i18n/de-DE/login.ts index 42af65d0f0..7ef0e2420a 100644 --- a/web/i18n/de-DE/login.ts +++ b/web/i18n/de-DE/login.ts @@ -70,7 +70,6 @@ const translation = { activated: 'Jetzt anmelden', adminInitPassword: 'Admin-Initialpasswort', validate: 'Validieren', - sso: 'Mit SSO fortfahren', checkCode: { didNotReceiveCode: 'Sie haben den Code nicht erhalten?', verificationCodePlaceholder: 'Geben Sie den 6-stelligen Code ein', diff --git a/web/i18n/de-DE/run-log.ts b/web/i18n/de-DE/run-log.ts index a9617b6a6a..873054973f 100644 --- a/web/i18n/de-DE/run-log.ts +++ b/web/i18n/de-DE/run-log.ts @@ -21,7 +21,6 @@ const translation = { resultEmpty: { title: 'Dieser Lauf gibt nur das JSON-Format aus', tipLeft: 'Bitte gehen Sie zum ', - Link: 'Detailpanel', tipRight: 'ansehen.', link: 'Gruppe Detail', }, diff --git a/web/i18n/de-DE/tools.ts b/web/i18n/de-DE/tools.ts index 4e63cdd315..d684e3bd77 100644 --- a/web/i18n/de-DE/tools.ts +++ b/web/i18n/de-DE/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Http Header Key, Sie können es bei "Authorization" belassen, wenn Sie nicht wissen, was es ist, oder auf einen benutzerdefinierten Wert setzen', types: { none: 'Keine', - api_key: 'API-Key', apiKeyPlaceholder: 'HTTP-Headername für API-Key', apiValuePlaceholder: 'API-Key eingeben', api_key_header: 'Kopfzeile', diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index 121f5da1a2..72f9642995 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -104,10 +104,8 @@ const translation = { loadMore: 'Weitere Workflows laden', noHistory: 'Keine Geschichte', exportSVG: 'Als SVG exportieren', - noExist: 'Keine solche Variable', versionHistory: 'Versionsverlauf', publishUpdate: 'Update veröffentlichen', - referenceVar: 'Referenzvariable', exportImage: 'Bild exportieren', exportJPEG: 'Als JPEG exportieren', exitVersions: 'Ausgangsversionen', @@ -222,7 +220,6 @@ const translation = { tabs: { 'tools': 'Werkzeuge', 'allTool': 'Alle', - 'builtInTool': 'Eingebaut', 'customTool': 'Benutzerdefiniert', 'workflowTool': 'Arbeitsablauf', 'question-understand': 'Fragen verstehen', @@ -587,7 +584,6 @@ const translation = { 'not empty': 'ist nicht leer', 'null': 'ist null', 'not null': 'ist nicht null', - 'regex match': 'Regex-Übereinstimmung', 'not exists': 'existiert nicht', 'in': 'in', 'all of': 'alle', @@ -610,7 +606,6 @@ const translation = { }, select: 'Auswählen', addSubVariable: 'Untervariable', - condition: 'Bedingung', }, variableAssigner: { title: 'Variablen zuweisen', diff --git a/web/i18n/es-ES/app-debug.ts b/web/i18n/es-ES/app-debug.ts index 78b3329403..dbdc32c36b 100644 --- a/web/i18n/es-ES/app-debug.ts +++ b/web/i18n/es-ES/app-debug.ts @@ -227,21 +227,6 @@ const translation = { }, }, automatic: { - title: 'Orquestación automatizada de aplicaciones', - description: 'Describe tu escenario, Dify orquestará una aplicación para ti.', - intendedAudience: '¿Quién es el público objetivo?', - intendedAudiencePlaceHolder: 'p.ej. Estudiante', - solveProblem: '¿Qué problemas esperan que la IA pueda resolver para ellos?', - solveProblemPlaceHolder: 'p.ej. Extraer ideas y resumir información de informes y artículos largos', - generate: 'Generar', - audiencesRequired: 'Audiencia requerida', - problemRequired: 'Problema requerido', - resTitle: 'Hemos orquestado la siguiente aplicación para ti.', - apply: 'Aplicar esta orquestación', - noData: 'Describe tu caso de uso a la izquierda, la vista previa de la orquestación se mostrará aquí.', - loading: 'Orquestando la aplicación para ti...', - overwriteTitle: '¿Sobrescribir configuración existente?', - overwriteMessage: 'Aplicar esta orquestación sobrescribirá la configuración existente.', }, resetConfig: { title: '¿Confirmar restablecimiento?', diff --git a/web/i18n/es-ES/app.ts b/web/i18n/es-ES/app.ts index 55e14df838..d8f6a2cec4 100644 --- a/web/i18n/es-ES/app.ts +++ b/web/i18n/es-ES/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Crear desde cero', startFromTemplate: 'Crear desde plantilla', - captionAppType: '¿Qué tipo de app quieres crear?', - chatbotDescription: 'Crea una aplicación basada en chat. Esta app utiliza un formato de pregunta y respuesta, permitiendo múltiples rondas de conversación continua.', - completionDescription: 'Crea una aplicación que genera texto de alta calidad basado en prompts, como la generación de artículos, resúmenes, traducciones y más.', - completionWarning: 'Este tipo de app ya no será compatible.', - agentDescription: 'Crea un Agente inteligente que puede elegir herramientas de forma autónoma para completar tareas', - workflowDescription: 'Crea una aplicación que genera texto de alta calidad basado en flujos de trabajo con un alto grado de personalización. Es adecuado para usuarios experimentados.', workflowWarning: 'Actualmente en beta', - chatbotType: 'Método de orquestación del Chatbot', - basic: 'Básico', - basicTip: 'Para principiantes, se puede cambiar a Chatflow más adelante', - basicFor: 'PARA PRINCIPIANTES', - basicDescription: 'La Orquestación Básica permite la orquestación de una app de Chatbot utilizando configuraciones simples, sin la capacidad de modificar los prompts incorporados. Es adecuado para principiantes.', - advanced: 'Chatflow', - advancedFor: 'Para usuarios avanzados', - advancedDescription: 'La Orquestación de Flujo de Trabajo orquesta Chatbots en forma de flujos de trabajo, ofreciendo un alto grado de personalización, incluida la capacidad de editar los prompts incorporados. Es adecuado para usuarios experimentados.', captionName: 'Icono y nombre de la app', appNamePlaceholder: 'Asigna un nombre a tu app', captionDescription: 'Descripción', diff --git a/web/i18n/es-ES/billing.ts b/web/i18n/es-ES/billing.ts index 3f83dafd01..5f5bd42bff 100644 --- a/web/i18n/es-ES/billing.ts +++ b/web/i18n/es-ES/billing.ts @@ -23,19 +23,14 @@ const translation = { contractSales: 'Contactar ventas', contractOwner: 'Contactar al administrador del equipo', startForFree: 'Empezar gratis', - getStartedWith: 'Empezar con ', contactSales: 'Contactar Ventas', talkToSales: 'Hablar con Ventas', modelProviders: 'Proveedores de Modelos', - teamMembers: 'Miembros del Equipo', annotationQuota: 'Cuota de Anotación', buildApps: 'Crear Aplicaciones', vectorSpace: 'Espacio Vectorial', - vectorSpaceBillingTooltip: 'Cada 1MB puede almacenar aproximadamente 1.2 millones de caracteres de datos vectorizados (estimado utilizando OpenAI Embeddings, varía según los modelos).', vectorSpaceTooltip: 'El Espacio Vectorial es el sistema de memoria a largo plazo necesario para que los LLMs comprendan tus datos.', - documentsUploadQuota: 'Cuota de Carga de Documentos', documentProcessingPriority: 'Prioridad de Procesamiento de Documentos', - documentProcessingPriorityTip: 'Para una mayor prioridad de procesamiento de documentos, por favor actualiza tu plan.', documentProcessingPriorityUpgrade: 'Procesa más datos con mayor precisión y velocidad.', priority: { 'standard': 'Estándar', @@ -103,61 +98,52 @@ const translation = { sandbox: { name: 'Sandbox', description: 'Prueba gratuita de 200 veces GPT', - includesTitle: 'Incluye:', for: 'Prueba gratuita de capacidades básicas', }, professional: { name: 'Profesional', description: 'Para individuos y pequeños equipos que desean desbloquear más poder de manera asequible.', - includesTitle: 'Todo en el plan gratuito, más:', for: 'Para desarrolladores independientes/equipos pequeños', }, team: { name: 'Equipo', description: 'Colabora sin límites y disfruta de un rendimiento de primera categoría.', - includesTitle: 'Todo en el plan Profesional, más:', for: 'Para equipos de tamaño mediano', }, enterprise: { name: 'Empresa', description: 'Obtén capacidades completas y soporte para sistemas críticos a gran escala.', includesTitle: 'Todo en el plan Equipo, más:', - features: { - 0: 'Soluciones de implementación escalables de nivel empresarial', - 7: 'Actualizaciones y Mantenimiento por Dify Oficialmente', - 8: 'Soporte Técnico Profesional', - 3: 'Múltiples Espacios de Trabajo y Gestión Empresarial', - 1: 'Autorización de Licencia Comercial', - 2: 'Características Exclusivas de la Empresa', - 5: 'SLA negociados por Dify Partners', - 4: 'SSO', - 6: 'Seguridad y Controles Avanzados', - }, btnText: 'Contactar ventas', for: 'Para equipos de gran tamaño', price: 'Personalizado', priceTip: 'Facturación Anual Solo', + features: [ + 'Soluciones de implementación escalables a nivel empresarial', + 'Autorización de licencia comercial', + 'Funciones exclusivas para empresas', + 'Múltiples espacios de trabajo y gestión empresarial', + 'SSO (inicio de sesión único)', + 'SLAs negociados con socios de Dify', + 'Seguridad y controles avanzados', + 'Actualizaciones y mantenimiento oficiales por parte de Dify', + 'Soporte técnico profesional', + ], }, community: { - features: { - 0: 'Todas las características principales se lanzaron bajo el repositorio público', - 2: 'Cumple con la Licencia de Código Abierto de Dify', - 1: 'Espacio de trabajo único', - }, includesTitle: 'Características gratuitas:', for: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales', price: 'Gratis', btnText: 'Comienza con la Comunidad', name: 'Comunidad', description: 'Para usuarios individuales, pequeños equipos o proyectos no comerciales', + features: [ + 'Todas las funciones principales publicadas en el repositorio público', + 'Espacio de trabajo único', + 'Cumple con la licencia de código abierto de Dify', + ], }, premium: { - features: { - 0: 'Confiabilidad autogestionada por varios proveedores de nube', - 1: 'Espacio de trabajo único', - 3: 'Soporte prioritario por correo electrónico y chat', - 2: 'Personalización de logotipos y marcas de WebApp', - }, description: 'Para organizaciones y equipos de tamaño mediano', comingSoon: 'Soporte de Microsoft Azure y Google Cloud disponible próximamente', btnText: 'Obtén Premium en', @@ -166,6 +152,12 @@ const translation = { includesTitle: 'Todo de Community, además:', name: 'Premium', for: 'Para organizaciones y equipos de tamaño mediano', + features: [ + 'Fiabilidad autogestionada mediante varios proveedores de nube', + 'Espacio de trabajo único', + 'Personalización del logotipo y la marca de la aplicación web', + 'Soporte prioritario por correo electrónico y chat', + ], }, }, vectorSpace: { @@ -173,8 +165,6 @@ const translation = { fullSolution: 'Actualiza tu plan para obtener más espacio.', }, apps: { - fullTipLine1: 'Actualiza tu plan para', - fullTipLine2: 'crear más aplicaciones.', fullTip1des: 'Has alcanzado el límite de aplicaciones de construcción en este plan', fullTip2des: 'Se recomienda limpiar las aplicaciones inactivas para liberar espacio de uso, o contactarnos.', fullTip1: 'Actualiza para crear más aplicaciones', diff --git a/web/i18n/es-ES/common.ts b/web/i18n/es-ES/common.ts index a904bd82b9..586319a8b7 100644 --- a/web/i18n/es-ES/common.ts +++ b/web/i18n/es-ES/common.ts @@ -201,7 +201,6 @@ const translation = { showAppLength: 'Mostrar {{length}} apps', delete: 'Eliminar cuenta', deleteTip: 'Eliminar tu cuenta borrará permanentemente todos tus datos y no se podrán recuperar.', - deleteConfirmTip: 'Para confirmar, por favor envía lo siguiente desde tu correo electrónico registrado a ', account: 'Cuenta', myAccount: 'Mi Cuenta', studio: 'Estudio Dify', diff --git a/web/i18n/es-ES/dataset-creation.ts b/web/i18n/es-ES/dataset-creation.ts index 7bb62e1ea3..c361884051 100644 --- a/web/i18n/es-ES/dataset-creation.ts +++ b/web/i18n/es-ES/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Crear conocimiento', - update: 'Agregar datos', fallbackRoute: 'Conocimiento', }, one: 'Elegir fuente de datos', diff --git a/web/i18n/es-ES/dataset-documents.ts b/web/i18n/es-ES/dataset-documents.ts index c22ad66800..3775873b40 100644 --- a/web/i18n/es-ES/dataset-documents.ts +++ b/web/i18n/es-ES/dataset-documents.ts @@ -342,7 +342,6 @@ const translation = { keywords: 'Palabras clave', addKeyWord: 'Agregar palabra clave', keywordError: 'La longitud máxima de la palabra clave es 20', - characters: 'caracteres', hitCount: 'Cantidad de recuperación', vectorHash: 'Hash de vector: ', questionPlaceholder: 'agregar pregunta aquí', diff --git a/web/i18n/es-ES/dataset-hit-testing.ts b/web/i18n/es-ES/dataset-hit-testing.ts index e8faebc5ea..c9fba24947 100644 --- a/web/i18n/es-ES/dataset-hit-testing.ts +++ b/web/i18n/es-ES/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Prueba de recuperación', desc: 'Prueba del efecto de impacto del conocimiento basado en el texto de consulta proporcionado.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Recientes', table: { header: { source: 'Fuente', diff --git a/web/i18n/es-ES/login.ts b/web/i18n/es-ES/login.ts index 9601bffa6a..fda14f3708 100644 --- a/web/i18n/es-ES/login.ts +++ b/web/i18n/es-ES/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Tu nombre de usuario', forget: '¿Olvidaste tu contraseña?', signBtn: 'Iniciar sesión', - sso: 'Continuar con SSO', installBtn: 'Configurar', setAdminAccount: 'Configurando una cuenta de administrador', setAdminAccountDesc: 'Privilegios máximos para la cuenta de administrador, que se puede utilizar para crear aplicaciones y administrar proveedores de LLM, etc.', diff --git a/web/i18n/es-ES/tools.ts b/web/i18n/es-ES/tools.ts index 25cc1309e9..afb6dfa1e3 100644 --- a/web/i18n/es-ES/tools.ts +++ b/web/i18n/es-ES/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'Clave del encabezado HTTP, puedes dejarla como "Authorization" si no tienes idea de qué es o configurarla con un valor personalizado', types: { none: 'Ninguno', - api_key: 'Clave API', apiKeyPlaceholder: 'Nombre del encabezado HTTP para la Clave API', apiValuePlaceholder: 'Ingresa la Clave API', api_key_header: 'Encabezado', diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index d57a0a40f2..459121a168 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -108,9 +108,7 @@ const translation = { exitVersions: 'Versiones de salida', exportJPEG: 'Exportar como JPEG', exportPNG: 'Exportar como PNG', - referenceVar: 'Variable de referencia', publishUpdate: 'Publicar actualización', - noExist: 'No existe tal variable', exportImage: 'Exportar imagen', needAnswerNode: 'Se debe agregar el nodo de respuesta', needEndNode: 'Se debe agregar el nodo Final', @@ -222,7 +220,6 @@ const translation = { tabs: { 'tools': 'Herramientas', 'allTool': 'Todos', - 'builtInTool': 'Incorporadas', 'customTool': 'Personalizadas', 'workflowTool': 'Flujo de trabajo', 'question-understand': 'Entender pregunta', @@ -587,7 +584,6 @@ const translation = { 'not empty': 'no está vacío', 'null': 'es nulo', 'not null': 'no es nulo', - 'regex match': 'Coincidencia de expresiones regulares', 'not in': 'no en', 'in': 'en', 'exists': 'Existe', @@ -610,7 +606,6 @@ const translation = { }, select: 'Escoger', addSubVariable: 'Sub Variable', - condition: 'Condición', }, variableAssigner: { title: 'Asignar variables', @@ -771,9 +766,6 @@ const translation = { showAuthor: 'Mostrar autor', }, }, - tracing: { - stopBy: 'Detenido por {{user}}', - }, docExtractor: { outputVars: { text: 'Texto extraído', @@ -905,7 +897,7 @@ const translation = { }, }, tracing: { - stopBy: 'Pásate por {{usuario}}', + stopBy: 'Pásate por {{user}}', }, variableReference: { noAvailableVars: 'No hay variables disponibles', diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index f95253e73d..2f08183151 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'ابزارها', 'allTool': 'همه', - 'builtInTool': 'درون‌ساخت', 'customTool': 'سفارشی', 'workflowTool': 'جریان کار', 'question-understand': 'درک سوال', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'خالی نیست', 'null': 'خالی', 'not null': 'خالی نیست', - 'regex match': 'مسابقه regex', 'in': 'در', 'not exists': 'وجود ندارد', 'all of': 'همه از', diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index 884e3e9772..5e53a8b4ae 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Outils', 'allTool': 'Tous', - 'builtInTool': 'Intégré', 'customTool': 'Personnalisé', 'workflowTool': 'Flux de travail', 'question-understand': 'Compréhension des questions', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'n\'est pas vide', 'null': 'est nul', 'not null': 'n\'est pas nul', - 'regex match': 'correspondance regex', 'in': 'dans', 'not in': 'pas dans', 'exists': 'Existe', diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index d613c87f6a..95ccead15f 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -225,7 +225,6 @@ const translation = { tabs: { 'tools': 'टूल्स', 'allTool': 'सभी', - 'builtInTool': 'अंतर्निहित', 'customTool': 'कस्टम', 'workflowTool': 'कार्यप्रवाह', 'question-understand': 'प्रश्न समझ', @@ -602,7 +601,6 @@ const translation = { 'not empty': 'खाली नहीं है', 'null': 'शून्य है', 'not null': 'शून्य नहीं है', - 'regex match': 'रेगेक्स मैच', 'in': 'में', 'all of': 'के सभी', 'not exists': 'मौजूद नहीं है', diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index 196e6f761a..ca934428a6 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -227,7 +227,6 @@ const translation = { tabs: { 'tools': 'Strumenti', 'allTool': 'Tutti', - 'builtInTool': 'Integrato', 'customTool': 'Personalizzato', 'workflowTool': 'Flusso di lavoro', 'question-understand': 'Comprensione Domanda', @@ -606,7 +605,6 @@ const translation = { 'not empty': 'non è vuoto', 'null': 'è nullo', 'not null': 'non è nullo', - 'regex match': 'Corrispondenza regex', 'in': 'in', 'all of': 'tutto di', 'not in': 'non in', diff --git a/web/i18n/ja-JP/app-annotation.ts b/web/i18n/ja-JP/app-annotation.ts index 7dbdfe018f..801be7c672 100644 --- a/web/i18n/ja-JP/app-annotation.ts +++ b/web/i18n/ja-JP/app-annotation.ts @@ -9,8 +9,6 @@ const translation = { table: { header: { question: '質問', - match: 'マッチ', - response: '応答', answer: '回答', createdAt: '作成日時', hits: 'ヒット数', @@ -71,7 +69,6 @@ const translation = { noHitHistory: 'ヒット履歴はありません', }, hitHistoryTable: { - question: '質問', query: 'クエリ', match: '一致', response: '応答', diff --git a/web/i18n/ja-JP/app-debug.ts b/web/i18n/ja-JP/app-debug.ts index d13a64213a..66e06950f5 100644 --- a/web/i18n/ja-JP/app-debug.ts +++ b/web/i18n/ja-JP/app-debug.ts @@ -254,7 +254,6 @@ const translation = { noDataLine1: '左側に使用例を記入してください,', noDataLine2: 'オーケストレーションのプレビューがこちらに表示されます。', apply: '適用', - noData: '左側にユースケースを入力すると、こちらでプレビューができます。', loading: 'アプリケーションを処理中です', overwriteTitle: '既存の設定を上書きしますか?', overwriteMessage: 'このプロンプトを適用すると、既存の設定が上書きされます。', @@ -365,6 +364,7 @@ const translation = { 'varName': '変数名', 'labelName': 'ラベル名', 'inputPlaceholder': '入力してください', + 'content': '内容', 'required': '必須', 'hide': '非表示', 'file': { diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts index f68835c7e7..d05bf6c353 100644 --- a/web/i18n/ja-JP/app.ts +++ b/web/i18n/ja-JP/app.ts @@ -34,21 +34,7 @@ const translation = { newApp: { startFromBlank: '最初から作成', startFromTemplate: 'テンプレートから作成', - captionAppType: 'どのタイプのアプリを作成しますか?', - chatbotDescription: 'チャット形式のアプリケーションを構築します。このアプリは質問と回答の形式を使用し、複数のラウンドの継続的な会話を可能にします。', - completionDescription: 'プロンプトに基づいて高品質のテキストを生成するアプリケーションを構築します。記事、要約、翻訳などを生成します。', - completionWarning: 'この種類のアプリはもうサポートされなくなります。', - agentDescription: 'タスクを自動的に完了するためのツールを選択できるインテリジェント エージェントを構築します', - workflowDescription: '高度なカスタマイズが可能なワークフローに基づいて高品質のテキストを生成するアプリケーションを構築します。経験豊富なユーザー向けです。', workflowWarning: '現在ベータ版です', - chatbotType: 'チャットボットのオーケストレーション方法', - basic: '基本', - basicTip: '初心者向け。後で「チャットフロー」に切り替えることができます', - basicFor: '初心者向け', - basicDescription: '基本オーケストレートは、組み込みのプロンプトを変更する機能がなく、簡単な設定を使用してチャットボット アプリをオーケストレートします。初心者向けです。', - advanced: 'チャットフロー', - advancedFor: '上級ユーザー向け', - advancedDescription: 'ワークフロー オーケストレートは、ワークフロー形式でチャットボットをオーケストレートし、組み込みのプロンプトを編集する機能を含む高度なカスタマイズを提供します。経験豊富なユーザー向けです。', captionName: 'アプリのアイコンと名前', appNamePlaceholder: 'アプリ名を入力してください', captionDescription: '説明', diff --git a/web/i18n/ja-JP/common.ts b/web/i18n/ja-JP/common.ts index bd2dd22cf0..d0a6b64d6e 100644 --- a/web/i18n/ja-JP/common.ts +++ b/web/i18n/ja-JP/common.ts @@ -215,7 +215,6 @@ const translation = { showAppLength: '{{length}}アプリを表示', delete: 'アカウントを削除', deleteTip: 'アカウントを削除すると、すべてのデータが完全に消去され、復元できなくなります。', - deleteConfirmTip: '確認のため、登録したメールから次の内容をに送信してください ', account: 'アカウント', myAccount: 'マイアカウント', studio: 'スタジオ', diff --git a/web/i18n/ja-JP/login.ts b/web/i18n/ja-JP/login.ts index b37700eba2..833bedf719 100644 --- a/web/i18n/ja-JP/login.ts +++ b/web/i18n/ja-JP/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'ユーザー名を入力してください', forget: 'パスワードをお忘れですか?', signBtn: 'サインイン', - sso: 'SSO に続ける', installBtn: 'セットアップ', setAdminAccount: '管理者アカウントの設定', setAdminAccountDesc: 'アプリケーションの作成や LLM プロバイダの管理など、管理者アカウントの最大権限を設定します。', diff --git a/web/i18n/ja-JP/tools.ts b/web/i18n/ja-JP/tools.ts index 305cfc30cd..5eebc54fc0 100644 --- a/web/i18n/ja-JP/tools.ts +++ b/web/i18n/ja-JP/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'HTTP ヘッダーキー。アイデアがない場合は "Authorization" として残しておいてもかまいません。またはカスタム値に設定できます。', types: { none: 'なし', - api_key: 'API キー', apiKeyPlaceholder: 'API キーの HTTP ヘッダー名', apiValuePlaceholder: 'API キーを入力してください', api_key_query: 'クエリパラメータ', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index 035bba61a6..483adb402c 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -213,7 +213,6 @@ const translation = { startRun: '実行開始', running: '実行中', testRunIteration: 'テスト実行(イテレーション)', - testRunLoop: 'テスト実行(ループ)', back: '戻る', iteration: 'イテレーション', loop: 'ループ', @@ -592,7 +591,6 @@ const translation = { 'not empty': '空でない', 'null': 'null', 'not null': 'null でない', - 'regex match': '正規表現マッチ', 'in': '含まれている', 'not in': '含まれていない', 'all of': 'すべての', @@ -619,7 +617,6 @@ const translation = { variableAssigner: { title: '変数を代入する', outputType: '出力タイプ', - outputVarType: '出力変数のタイプ', varNotSet: '変数が設定されていません', noVarTip: '代入された変数を追加してください', type: { diff --git a/web/i18n/ko-KR/app-debug.ts b/web/i18n/ko-KR/app-debug.ts index f9bc9978d8..aade904a6b 100644 --- a/web/i18n/ko-KR/app-debug.ts +++ b/web/i18n/ko-KR/app-debug.ts @@ -227,21 +227,6 @@ const translation = { }, }, automatic: { - title: '자동 어플리케이션 오케스트레이션', - description: '시나리오를 설명하세요. Dify 가 어플리케이션을 자동으로 오케스트레이션 합니다.', - intendedAudience: '누가 대상이 되는지 설명하세요.', - intendedAudiencePlaceHolder: '예: 학생', - solveProblem: '어떤 문제를 AI 가 해결할 것으로 예상하나요?', - solveProblemPlaceHolder: '예: 학업 성적 평가', - generate: '생성', - audiencesRequired: '대상이 필요합니다', - problemRequired: '문제가 필요합니다', - resTitle: '다음 어플리케이션을 자동으로 오케스트레이션 했습니다.', - apply: '이 오케스트레이션을 적용하기', - noData: '왼쪽에 사용 예시를 기술하고, 오케스트레이션 미리보기가 여기에 나타납니다.', - loading: '어플리케이션 오케스트레이션을 실행 중입니다...', - overwriteTitle: '기존 구성을 덮어쓰시겠습니까?', - overwriteMessage: '이 오케스트레이션을 적용하면 기존 구성이 덮어쓰여집니다.', }, resetConfig: { title: '리셋을 확인하시겠습니까?', diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts index c113947961..f0d666301a 100644 --- a/web/i18n/ko-KR/app.ts +++ b/web/i18n/ko-KR/app.ts @@ -26,29 +26,10 @@ const translation = { newApp: { startFromBlank: '빈 상태로 시작', startFromTemplate: '템플릿에서 시작', - captionAppType: '어떤 종류의 앱을 만들어 보시겠어요?', - chatbotDescription: - '대화형 어플리케이션을 만듭니다. 질문과 답변 형식을 사용하여 다단계 대화를 지원합니다.', - completionDescription: - '프롬프트를 기반으로 품질 높은 텍스트를 생성하는 어플리케이션을 만듭니다. 기사, 요약, 번역 등을 생성할 수 있습니다.', - completionWarning: '이 종류의 앱은 더 이상 지원되지 않습니다.', - agentDescription: '작업을 자동으로 완료하는 지능형 에이전트를 만듭니다.', - workflowDescription: - '고도로 사용자 지정 가능한 워크플로우에 기반한 고품질 텍스트 생성 어플리케이션을 만듭니다. 경험 있는 사용자를 위한 것입니다.', - workflowWarning: '현재 베타 버전입니다.', - chatbotType: '챗봇 오케스트레이션 방식', - basic: '기본', - basicTip: '초보자용. 나중에 Chatflow 로 전환할 수 있습니다.', - basicFor: '초보자용', - basicDescription: - '기본 오케스트레이션은 내장된 프롬프트를 수정할 수 없고 간단한 설정을 사용하여 챗봇 앱을 오케스트레이션합니다. 초보자용입니다.', - advanced: 'Chatflow', - advancedFor: '고급 사용자용', - advancedDescription: - '워크플로우 오케스트레이션은 워크플로우 형식으로 챗봇을 오케스트레이션하며 내장된 프롬프트를 편집할 수 있는 고급 사용자 정의 기능을 제공합니다. 경험이 많은 사용자용입니다.', captionName: '앱 아이콘과 이름', appNamePlaceholder: '앱 이름을 입력하세요', captionDescription: '설명', + workflowWarning: '현재 베타 버전입니다', appDescriptionPlaceholder: '앱 설명을 입력하세요', useTemplate: '이 템플릿 사용', previewDemo: '데모 미리보기', diff --git a/web/i18n/ko-KR/billing.ts b/web/i18n/ko-KR/billing.ts index fbb2609adc..ba746ae338 100644 --- a/web/i18n/ko-KR/billing.ts +++ b/web/i18n/ko-KR/billing.ts @@ -23,20 +23,14 @@ const translation = { contractSales: '영업팀에 문의하기', contractOwner: '팀 관리자에게 문의하기', startForFree: '무료로 시작하기', - getStartedWith: '시작하기 ', contactSales: '영업팀에 문의하기', talkToSales: '영업팀과 상담하기', modelProviders: '모델 제공자', - teamMembers: '팀 멤버', buildApps: '앱 만들기', vectorSpace: '벡터 공간', - vectorSpaceBillingTooltip: - '1MB 당 약 120 만 글자의 벡터화된 데이터를 저장할 수 있습니다 (OpenAI Embeddings 을 기반으로 추정되며 모델에 따라 다릅니다).', vectorSpaceTooltip: '벡터 공간은 LLM 이 데이터를 이해하는 데 필요한 장기 기억 시스템입니다.', documentProcessingPriority: '문서 처리 우선순위', - documentProcessingPriorityTip: - '더 높은 문서 처리 우선순위를 원하시면 요금제를 업그레이드하세요.', documentProcessingPriorityUpgrade: '더 높은 정확성과 빠른 속도로 데이터를 처리합니다.', priority: { @@ -85,7 +79,6 @@ const translation = { 'Dify 의 지식베이스 처리 기능을 호출하는 API 호출 수를 나타냅니다.', receiptInfo: '팀 소유자 및 팀 관리자만 구독 및 청구 정보를 볼 수 있습니다', annotationQuota: 'Annotation Quota(주석 할당량)', - documentsUploadQuota: '문서 업로드 할당량', freeTrialTipPrefix: '요금제에 가입하고 ', comparePlanAndFeatures: '계획 및 기능 비교', documents: '{{count,number}} 지식 문서', @@ -114,20 +107,17 @@ const translation = { sandbox: { name: '샌드박스', description: 'GPT 무료 체험 200 회', - includesTitle: '포함된 항목:', for: '핵심 기능 무료 체험', }, professional: { name: '프로페셔널', description: '개인 및 소규모 팀을 위해 더 많은 파워를 저렴한 가격에 제공합니다.', - includesTitle: '무료 플랜에 추가로 포함된 항목:', for: '1인 개발자/소규모 팀을 위한', }, team: { name: '팀', description: '제한 없이 협업하고 최고의 성능을 누리세요.', - includesTitle: '프로페셔널 플랜에 추가로 포함된 항목:', for: '중간 규모 팀을 위한', }, enterprise: { @@ -135,42 +125,36 @@ const translation = { description: '대규모 미션 크리티컬 시스템을 위한 완전한 기능과 지원을 제공합니다.', includesTitle: '팀 플랜에 추가로 포함된 항목:', - features: { - 2: '독점 기업 기능', - 1: '상업적 라이선스 승인', - 3: '다중 작업 공간 및 기업 관리', - 4: 'SSO', - 5: 'Dify 파트너에 의해 협상된 SLA', - 6: '고급 보안 및 제어', - 0: '기업급 확장 가능한 배포 솔루션', - 7: '디피 공식 업데이트 및 유지 관리', - 8: '전문 기술 지원', - }, price: '맞춤형', btnText: '판매 문의하기', for: '대규모 팀을 위해', priceTip: '연간 청구 전용', + features: [ + '엔터프라이즈급 확장 가능한 배포 솔루션', + '상업용 라이선스 인증', + '전용 엔터프라이즈 기능', + '다중 워크스페이스 및 엔터프라이즈 관리', + 'SSO(싱글 사인온)', + 'Dify 파트너와의 협상을 통한 SLA', + '고급 보안 및 제어 기능', + 'Dify의 공식 업데이트 및 유지 관리', + '전문 기술 지원', + ], }, community: { - features: { - 0: '모든 핵심 기능이 공개 저장소에 릴리스됨', - 2: 'Dify 오픈 소스 라이선스를 준수합니다.', - 1: '단일 작업 공간', - }, btnText: '커뮤니티 시작하기', description: '개인 사용자, 소규모 팀 또는 비상업적 프로젝트를 위한', name: '커뮤니티', price: '무료', includesTitle: '무료 기능:', for: '개인 사용자, 소규모 팀 또는 비상업적 프로젝트를 위한', + features: [ + '모든 핵심 기능이 공개 저장소에 공개됨', + '단일 워크스페이스', + 'Dify 오픈소스 라이선스를 준수함', + ], }, premium: { - features: { - 1: '단일 작업 공간', - 2: '웹앱 로고 및 브랜딩 맞춤화', - 3: '우선 이메일 및 채팅 지원', - 0: '다양한 클라우드 제공업체에 의한 자율 관리 신뢰성', - }, btnText: '프리미엄 받기', priceTip: '클라우드 마켓플레이스를 기반으로', name: '프리미엄', @@ -179,6 +163,12 @@ const translation = { price: '확장 가능', for: '중규모 조직 및 팀을 위한', includesTitle: '커뮤니티의 모든 것, 여기에 추가로:', + features: [ + '다양한 클라우드 제공업체를 통한 자가 관리 신뢰성', + '단일 워크스페이스', + '웹앱 로고 및 브랜딩 커스터마이징', + '우선 이메일 및 채팅 지원', + ], }, }, vectorSpace: { @@ -186,8 +176,6 @@ const translation = { fullSolution: '더 많은 공간을 얻으려면 요금제를 업그레이드하세요.', }, apps: { - fullTipLine1: '더 많은 앱을 생성하려면,', - fullTipLine2: '요금제를 업그레이드하세요.', contactUs: '문의하기', fullTip1: '업그레이드하여 더 많은 앱을 만들기', fullTip2: '계획 한도에 도달했습니다.', diff --git a/web/i18n/ko-KR/common.ts b/web/i18n/ko-KR/common.ts index 06f8f19ab3..e01ebbcf23 100644 --- a/web/i18n/ko-KR/common.ts +++ b/web/i18n/ko-KR/common.ts @@ -193,7 +193,6 @@ const translation = { showAppLength: '{{length}}개의 앱 표시', delete: '계정 삭제', deleteTip: '계정을 삭제하면 모든 데이터가 영구적으로 지워지며 복구할 수 없습니다.', - deleteConfirmTip: '확인하려면 등록된 이메일에서 다음 내용을 로 보내주세요 ', myAccount: '내 계정', studio: '디파이 스튜디오', account: '계정', diff --git a/web/i18n/ko-KR/dataset-creation.ts b/web/i18n/ko-KR/dataset-creation.ts index 9449d40550..1c03b0ae7d 100644 --- a/web/i18n/ko-KR/dataset-creation.ts +++ b/web/i18n/ko-KR/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: '지식 생성', - update: '데이터 추가', fallbackRoute: '지식', }, one: '데이터 소스 선택', diff --git a/web/i18n/ko-KR/dataset-documents.ts b/web/i18n/ko-KR/dataset-documents.ts index d65b3b6fee..e026144f17 100644 --- a/web/i18n/ko-KR/dataset-documents.ts +++ b/web/i18n/ko-KR/dataset-documents.ts @@ -341,7 +341,6 @@ const translation = { keywords: '키워드', addKeyWord: '키워드 추가', keywordError: '키워드 최대 길이는 20 자입니다', - characters: '문자', hitCount: '검색 횟수', vectorHash: '벡터 해시: ', questionPlaceholder: '질문을 입력하세요', diff --git a/web/i18n/ko-KR/dataset-hit-testing.ts b/web/i18n/ko-KR/dataset-hit-testing.ts index 17ab7db08a..07e205fd4f 100644 --- a/web/i18n/ko-KR/dataset-hit-testing.ts +++ b/web/i18n/ko-KR/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: '검색 테스트', desc: '주어진 쿼리 텍스트에 기반하여 지식의 검색 효과를 테스트합니다.', dateTimeFormat: 'YYYY/MM/DD HH:mm', - recents: '최근 결과', table: { header: { source: '소스', diff --git a/web/i18n/ko-KR/login.ts b/web/i18n/ko-KR/login.ts index d0a6925ee4..51b68967c2 100644 --- a/web/i18n/ko-KR/login.ts +++ b/web/i18n/ko-KR/login.ts @@ -70,7 +70,6 @@ const translation = { activated: '지금 로그인하세요', adminInitPassword: '관리자 초기화 비밀번호', validate: '확인', - sso: 'SSO 로 계속하기', checkCode: { verify: '확인', verificationCode: '인증 코드', diff --git a/web/i18n/ko-KR/tools.ts b/web/i18n/ko-KR/tools.ts index 9ff3fe1ece..d1a1d709c0 100644 --- a/web/i18n/ko-KR/tools.ts +++ b/web/i18n/ko-KR/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'HTTP 헤더 키입니다. 생각이 없으면 "Authorization"으로 남겨둘 수 있습니다. 또는 사용자 정의 값을 설정할 수 있습니다.', types: { none: '없음', - api_key: 'API 키', apiKeyPlaceholder: 'API 키의 HTTP 헤더 이름', apiValuePlaceholder: 'API 키를 입력하세요', api_key_query: '쿼리 매개변수', diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index a65925f254..7a7902dfdc 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -111,11 +111,9 @@ const translation = { exportJPEG: 'JPEG 로 내보내기', exitVersions: '종료 버전', exportImage: '이미지 내보내기', - noExist: '해당 변수가 없습니다.', exportSVG: 'SVG 로 내보내기', versionHistory: '버전 기록', exportPNG: 'PNG 로 내보내기', - referenceVar: '참조 변수', addBlock: '노드 추가', needAnswerNode: '답변 노드를 추가해야 합니다.', needEndNode: '종단 노드를 추가해야 합니다.', @@ -231,7 +229,6 @@ const translation = { tabs: { 'tools': '도구', 'allTool': '전체', - 'builtInTool': '내장', 'customTool': '사용자 정의', 'workflowTool': '워크플로우', 'question-understand': '질문 이해', @@ -617,7 +614,6 @@ const translation = { 'not empty': '비어 있지 않음', 'null': 'null 임', 'not null': 'null 이 아님', - 'regex match': '정규식 일치', 'in': '안으로', 'exists': '존재', 'all of': '모두의', @@ -640,7 +636,6 @@ const translation = { }, select: '고르다', addSubVariable: '하위 변수', - condition: '조건', }, variableAssigner: { title: '변수 할당', @@ -761,8 +756,6 @@ const translation = { reasoningMode: '추론 모드', reasoningModeTip: '모델의 함수 호출 또는 프롬프트에 대한 지시 응답 능력을 기반으로 적절한 추론 모드를 선택할 수 있습니다.', - isSuccess: '성공 여부. 성공 시 값은 1 이고, 실패 시 값은 0 입니다.', - errorReason: '오류 원인', }, iteration: { deleteTitle: '반복 노드를 삭제하시겠습니까?', diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index a29ec9b8f2..56b7536879 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Narzędzia', 'allTool': 'Wszystkie', - 'builtInTool': 'Wbudowane', 'customTool': 'Niestandardowe', 'workflowTool': 'Przepływ pracy', 'question-understand': 'Zrozumienie pytania', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'nie jest pusty', 'null': 'jest null', 'not null': 'nie jest null', - 'regex match': 'Dopasowanie wyrażenia regularnego', 'in': 'w', 'not exists': 'nie istnieje', 'exists': 'Istnieje', diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index ec870d0e17..d5820bd611 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Ferramentas', 'allTool': 'Todos', - 'builtInTool': 'Integrado', 'customTool': 'Personalizado', 'workflowTool': 'Fluxo de trabalho', 'question-understand': 'Compreensão de perguntas', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'não está vazio', 'null': 'é nulo', 'not null': 'não é nulo', - 'regex match': 'partida regex', 'in': 'em', 'not in': 'não em', 'exists': 'Existe', diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index 5612f5d1fc..4a24f7dc00 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Instrumente', 'allTool': 'Toate', - 'builtInTool': 'Integrat', 'customTool': 'Personalizat', 'workflowTool': 'Flux de lucru', 'question-understand': 'Înțelegerea întrebărilor', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'nu este gol', 'null': 'este null', 'not null': 'nu este null', - 'regex match': 'potrivire regex', 'in': 'în', 'not in': 'nu în', 'exists': 'Există', diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 8ab0f04c8e..284a88c5b2 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -223,7 +223,6 @@ const translation = { 'searchTool': 'Поиск инструмента', 'tools': 'Инструменты', 'allTool': 'Все', - 'builtInTool': 'Встроенные', 'customTool': 'Пользовательские', 'workflowTool': 'Рабочий процесс', 'question-understand': 'Понимание вопроса', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'не пусто', 'null': 'null', 'not null': 'не null', - 'regex match': 'Совпадение с регулярным выражением', 'all of': 'все', 'not in': 'не в', 'not exists': 'не существует', diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index 875f347cbb..45b61b011e 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -223,7 +223,6 @@ const translation = { 'searchTool': 'เครื่องมือค้นหา', 'tools': 'เครื่อง มือ', 'allTool': 'ทั้งหมด', - 'builtInTool': 'ในตัว', 'customTool': 'ธรรมเนียม', 'workflowTool': 'เวิร์กโฟลว์', 'question-understand': 'คําถาม: เข้าใจ', diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 9572217062..8fac474b26 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Araçlar', 'allTool': 'Hepsi', - 'builtInTool': 'Yerleşik', 'customTool': 'Özel', 'workflowTool': 'Workflow', 'question-understand': 'Soruyu Anlama', @@ -588,7 +587,6 @@ const translation = { 'not empty': 'boş değil', 'null': 'null', 'not null': 'null değil', - 'regex match': 'normal ifade maçı', 'in': 'içinde', 'not exists': 'mevcut değil', 'all of': 'Tümü', diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index 65dfab68ad..f5cf52d8db 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Інструменти', 'allTool': 'Усі', - 'builtInTool': 'Вбудовані', 'customTool': 'Користувацькі', 'workflowTool': 'Робочий потік', 'question-understand': 'Розуміння питань', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'не порожній', 'null': 'є null', 'not null': 'не є null', - 'regex match': 'Регулярний вираз збігу', 'in': 'В', 'all of': 'Всі з', 'exists': 'Існує', diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index ebe06807b1..77f22613b4 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -222,7 +222,6 @@ const translation = { tabs: { 'tools': 'Công cụ', 'allTool': 'Tất cả', - 'builtInTool': 'Tích hợp sẵn', 'customTool': 'Tùy chỉnh', 'workflowTool': 'Quy trình làm việc', 'question-understand': 'Hiểu câu hỏi', @@ -587,7 +586,6 @@ const translation = { 'not empty': 'không trống', 'null': 'là null', 'not null': 'không là null', - 'regex match': 'Trận đấu Regex', 'exists': 'Tồn tại', 'not exists': 'không tồn tại', 'not in': 'không có trong', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index bcdfbb81d3..f522e990b0 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -222,7 +222,6 @@ const translation = { 'blocks': '節點', 'tools': '工具', 'allTool': '全部', - 'builtInTool': '內置', 'customTool': '自定義', 'workflowTool': '工作流', 'question-understand': '問題理解', @@ -585,7 +584,6 @@ const translation = { 'not empty': '不為空', 'null': '空', 'not null': '不為空', - 'regex match': '正則表達式匹配', 'all of': '全部', 'exists': '存在', 'in': '在', From eee576355b06a3cf2ba7cbaada4a4c49d2341448 Mon Sep 17 00:00:00 2001 From: rhochman Date: Wed, 30 Jul 2025 10:12:16 +0700 Subject: [PATCH 068/415] Fix: Support for Elasticsearch Cloud Connector (#23017) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../middleware/vdb/elasticsearch_config.py | 52 ++++- .../vdb/elasticsearch/elasticsearch_vector.py | 180 +++++++++++++++--- .../vdb/elasticsearch/test_elasticsearch.py | 4 +- docker/.env.example | 11 ++ docker/docker-compose.yaml | 8 + 5 files changed, 221 insertions(+), 34 deletions(-) diff --git a/api/configs/middleware/vdb/elasticsearch_config.py b/api/configs/middleware/vdb/elasticsearch_config.py index df8182985d..8c4b333d45 100644 --- a/api/configs/middleware/vdb/elasticsearch_config.py +++ b/api/configs/middleware/vdb/elasticsearch_config.py @@ -1,12 +1,13 @@ from typing import Optional -from pydantic import Field, PositiveInt +from pydantic import Field, PositiveInt, model_validator from pydantic_settings import BaseSettings class ElasticsearchConfig(BaseSettings): """ - Configuration settings for Elasticsearch + Configuration settings for both self-managed and Elastic Cloud deployments. + Can load from environment variables or .env files. """ ELASTICSEARCH_HOST: Optional[str] = Field( @@ -28,3 +29,50 @@ class ElasticsearchConfig(BaseSettings): description="Password for authenticating with Elasticsearch (default is 'elastic')", default="elastic", ) + + # Elastic Cloud (optional) + ELASTICSEARCH_USE_CLOUD: Optional[bool] = Field( + description="Set to True to use Elastic Cloud instead of self-hosted Elasticsearch", default=False + ) + ELASTICSEARCH_CLOUD_URL: Optional[str] = Field( + description="Full URL for Elastic Cloud deployment (e.g., 'https://example.es.region.aws.found.io:443')", + default=None, + ) + ELASTICSEARCH_API_KEY: Optional[str] = Field( + description="API key for authenticating with Elastic Cloud", default=None + ) + + # Common options + ELASTICSEARCH_CA_CERTS: Optional[str] = Field( + description="Path to CA certificate file for SSL verification", default=None + ) + ELASTICSEARCH_VERIFY_CERTS: bool = Field( + description="Whether to verify SSL certificates (default is False)", default=False + ) + ELASTICSEARCH_REQUEST_TIMEOUT: int = Field( + description="Request timeout in milliseconds (default is 100000)", default=100000 + ) + ELASTICSEARCH_RETRY_ON_TIMEOUT: bool = Field( + description="Whether to retry requests on timeout (default is True)", default=True + ) + ELASTICSEARCH_MAX_RETRIES: int = Field( + description="Maximum number of retry attempts (default is 10000)", default=10000 + ) + + @model_validator(mode="after") + def validate_elasticsearch_config(self): + """Validate Elasticsearch configuration based on deployment type.""" + if self.ELASTICSEARCH_USE_CLOUD: + if not self.ELASTICSEARCH_CLOUD_URL: + raise ValueError("ELASTICSEARCH_CLOUD_URL is required when using Elastic Cloud") + if not self.ELASTICSEARCH_API_KEY: + raise ValueError("ELASTICSEARCH_API_KEY is required when using Elastic Cloud") + else: + if not self.ELASTICSEARCH_HOST: + raise ValueError("ELASTICSEARCH_HOST is required for self-hosted Elasticsearch") + if not self.ELASTICSEARCH_USERNAME: + raise ValueError("ELASTICSEARCH_USERNAME is required for self-hosted Elasticsearch") + if not self.ELASTICSEARCH_PASSWORD: + raise ValueError("ELASTICSEARCH_PASSWORD is required for self-hosted Elasticsearch") + + return self diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index 832485b236..9dea050dc3 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -22,22 +22,50 @@ logger = logging.getLogger(__name__) class ElasticSearchConfig(BaseModel): - host: str - port: int - username: str - password: str + # Regular Elasticsearch config + host: Optional[str] = None + port: Optional[int] = None + username: Optional[str] = None + password: Optional[str] = None + + # Elastic Cloud specific config + cloud_url: Optional[str] = None # Cloud URL for Elasticsearch Cloud + api_key: Optional[str] = None + + # Common config + use_cloud: bool = False + ca_certs: Optional[str] = None + verify_certs: bool = False + request_timeout: int = 100000 + retry_on_timeout: bool = True + max_retries: int = 10000 @model_validator(mode="before") @classmethod def validate_config(cls, values: dict) -> dict: - if not values["host"]: - raise ValueError("config HOST is required") - if not values["port"]: - raise ValueError("config PORT is required") - if not values["username"]: - raise ValueError("config USERNAME is required") - if not values["password"]: - raise ValueError("config PASSWORD is required") + use_cloud = values.get("use_cloud", False) + cloud_url = values.get("cloud_url") + + if use_cloud: + # Cloud configuration validation - requires cloud_url and api_key + if not cloud_url: + raise ValueError("cloud_url is required for Elastic Cloud") + + api_key = values.get("api_key") + if not api_key: + raise ValueError("api_key is required for Elastic Cloud") + + else: + # Regular Elasticsearch validation + if not values.get("host"): + raise ValueError("config HOST is required for regular Elasticsearch") + if not values.get("port"): + raise ValueError("config PORT is required for regular Elasticsearch") + if not values.get("username"): + raise ValueError("config USERNAME is required for regular Elasticsearch") + if not values.get("password"): + raise ValueError("config PASSWORD is required for regular Elasticsearch") + return values @@ -50,21 +78,69 @@ class ElasticSearchVector(BaseVector): self._attributes = attributes def _init_client(self, config: ElasticSearchConfig) -> Elasticsearch: + """ + Initialize Elasticsearch client for both regular Elasticsearch and Elastic Cloud. + """ try: - parsed_url = urlparse(config.host) - if parsed_url.scheme in {"http", "https"}: - hosts = f"{config.host}:{config.port}" + # Check if using Elastic Cloud + client_config: dict[str, Any] + if config.use_cloud and config.cloud_url: + client_config = { + "request_timeout": config.request_timeout, + "retry_on_timeout": config.retry_on_timeout, + "max_retries": config.max_retries, + "verify_certs": config.verify_certs, + } + + # Parse cloud URL and configure hosts + parsed_url = urlparse(config.cloud_url) + host = f"{parsed_url.scheme}://{parsed_url.hostname}" + if parsed_url.port: + host += f":{parsed_url.port}" + + client_config["hosts"] = [host] + + # API key authentication for cloud + client_config["api_key"] = config.api_key + + # SSL settings + if config.ca_certs: + client_config["ca_certs"] = config.ca_certs + else: - hosts = f"http://{config.host}:{config.port}" - client = Elasticsearch( - hosts=hosts, - basic_auth=(config.username, config.password), - request_timeout=100000, - retry_on_timeout=True, - max_retries=10000, - ) - except requests.exceptions.ConnectionError: - raise ConnectionError("Vector database connection error") + # Regular Elasticsearch configuration + parsed_url = urlparse(config.host or "") + if parsed_url.scheme in {"http", "https"}: + hosts = f"{config.host}:{config.port}" + use_https = parsed_url.scheme == "https" + else: + hosts = f"http://{config.host}:{config.port}" + use_https = False + + client_config = { + "hosts": [hosts], + "basic_auth": (config.username, config.password), + "request_timeout": config.request_timeout, + "retry_on_timeout": config.retry_on_timeout, + "max_retries": config.max_retries, + } + + # Only add SSL settings if using HTTPS + if use_https: + client_config["verify_certs"] = config.verify_certs + if config.ca_certs: + client_config["ca_certs"] = config.ca_certs + + client = Elasticsearch(**client_config) + + # Test connection + if not client.ping(): + raise ConnectionError("Failed to connect to Elasticsearch") + + except requests.exceptions.ConnectionError as e: + raise ConnectionError(f"Vector database connection error: {str(e)}") + except Exception as e: + raise ConnectionError(f"Elasticsearch client initialization failed: {str(e)}") return client @@ -209,7 +285,11 @@ class ElasticSearchVector(BaseVector): }, } } + self._client.indices.create(index=self._collection_name, mappings=mappings) + logger.info("Created index %s with dimension %s", self._collection_name, dim) + else: + logger.info("Collection %s already exists.", self._collection_name) redis_client.set(collection_exist_cache_key, 1, ex=3600) @@ -225,13 +305,51 @@ class ElasticSearchVectorFactory(AbstractVectorFactory): dataset.index_struct = json.dumps(self.gen_index_struct_dict(VectorType.ELASTICSEARCH, collection_name)) config = current_app.config + + # Check if ELASTICSEARCH_USE_CLOUD is explicitly set to false (boolean) + use_cloud_env = config.get("ELASTICSEARCH_USE_CLOUD", False) + + if use_cloud_env is False: + # Use regular Elasticsearch with config values + config_dict = { + "use_cloud": False, + "host": config.get("ELASTICSEARCH_HOST", "elasticsearch"), + "port": config.get("ELASTICSEARCH_PORT", 9200), + "username": config.get("ELASTICSEARCH_USERNAME", "elastic"), + "password": config.get("ELASTICSEARCH_PASSWORD", "elastic"), + } + else: + # Check for cloud configuration + cloud_url = config.get("ELASTICSEARCH_CLOUD_URL") + if cloud_url: + config_dict = { + "use_cloud": True, + "cloud_url": cloud_url, + "api_key": config.get("ELASTICSEARCH_API_KEY"), + } + else: + # Fallback to regular Elasticsearch + config_dict = { + "use_cloud": False, + "host": config.get("ELASTICSEARCH_HOST", "localhost"), + "port": config.get("ELASTICSEARCH_PORT", 9200), + "username": config.get("ELASTICSEARCH_USERNAME", "elastic"), + "password": config.get("ELASTICSEARCH_PASSWORD", ""), + } + + # Common configuration + config_dict.update( + { + "ca_certs": str(config.get("ELASTICSEARCH_CA_CERTS")) if config.get("ELASTICSEARCH_CA_CERTS") else None, + "verify_certs": bool(config.get("ELASTICSEARCH_VERIFY_CERTS", False)), + "request_timeout": int(config.get("ELASTICSEARCH_REQUEST_TIMEOUT", 100000)), + "retry_on_timeout": bool(config.get("ELASTICSEARCH_RETRY_ON_TIMEOUT", True)), + "max_retries": int(config.get("ELASTICSEARCH_MAX_RETRIES", 10000)), + } + ) + return ElasticSearchVector( index_name=collection_name, - config=ElasticSearchConfig( - host=config.get("ELASTICSEARCH_HOST", "localhost"), - port=config.get("ELASTICSEARCH_PORT", 9200), - username=config.get("ELASTICSEARCH_USERNAME", ""), - password=config.get("ELASTICSEARCH_PASSWORD", ""), - ), + config=ElasticSearchConfig(**config_dict), attributes=[], ) diff --git a/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py b/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py index 2a0c1bb038..a5ff5b9e82 100644 --- a/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py +++ b/api/tests/integration_tests/vdb/elasticsearch/test_elasticsearch.py @@ -11,7 +11,9 @@ class ElasticSearchVectorTest(AbstractVectorTest): self.attributes = ["doc_id", "dataset_id", "document_id", "doc_hash"] self.vector = ElasticSearchVector( index_name=self.collection_name.lower(), - config=ElasticSearchConfig(host="http://localhost", port="9200", username="elastic", password="elastic"), + config=ElasticSearchConfig( + use_cloud=False, host="http://localhost", port="9200", username="elastic", password="elastic" + ), attributes=self.attributes, ) diff --git a/docker/.env.example b/docker/.env.example index 9d15ba53d3..7ecdf899fe 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -583,6 +583,17 @@ ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=elastic KIBANA_PORT=5601 +# Using ElasticSearch Cloud Serverless, or not. +ELASTICSEARCH_USE_CLOUD=false +ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL +ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY + +ELASTICSEARCH_VERIFY_CERTS=False +ELASTICSEARCH_CA_CERTS= +ELASTICSEARCH_REQUEST_TIMEOUT=100000 +ELASTICSEARCH_RETRY_ON_TIMEOUT=True +ELASTICSEARCH_MAX_RETRIES=10 + # baidu vector configurations, only available when VECTOR_STORE is `baidu` BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 9e0f78eb07..ae83aa758d 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -261,6 +261,14 @@ x-shared-env: &shared-api-worker-env ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} KIBANA_PORT: ${KIBANA_PORT:-5601} + ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false} + ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL} + ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY} + ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False} + ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-} + ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000} + ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True} + ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10} BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} From c05c5953a839b74d4b83efc544606add3e65fb9e Mon Sep 17 00:00:00 2001 From: Joel Date: Wed, 30 Jul 2025 11:15:06 +0800 Subject: [PATCH 069/415] fix: disabled auto update but still show in plugin detail (#23150) --- .../components/plugins/plugin-detail-panel/detail-header.tsx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/web/app/components/plugins/plugin-detail-panel/detail-header.tsx b/web/app/components/plugins/plugin-detail-panel/detail-header.tsx index e5d1458140..9f20a0e228 100644 --- a/web/app/components/plugins/plugin-detail-panel/detail-header.tsx +++ b/web/app/components/plugins/plugin-detail-panel/detail-header.tsx @@ -124,6 +124,8 @@ const DetailHeader = ({ const isAutoUpgradeEnabled = useMemo(() => { if (!autoUpgradeInfo || !isFromMarketplace) return false + if(autoUpgradeInfo.strategy_setting === 'disabled') + return false if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.update_all) return true if(autoUpgradeInfo.upgrade_mode === AUTO_UPDATE_MODE.partial && autoUpgradeInfo.include_plugins.includes(plugin_id)) From 4499cda186144a67dda349548acec54682553d6f Mon Sep 17 00:00:00 2001 From: GuanMu Date: Wed, 30 Jul 2025 13:40:48 +0800 Subject: [PATCH 070/415] Feat annotations panel (#22968) --- api/controllers/console/app/annotation.py | 21 ++++- api/services/annotation_service.py | 48 +++++++++++ .../app/annotation/batch-action.tsx | 79 +++++++++++++++++++ web/app/components/app/annotation/index.tsx | 29 ++++++- web/app/components/app/annotation/list.tsx | 68 +++++++++++++++- web/i18n/en-US/app-annotation.ts | 10 +++ web/i18n/zh-Hans/app-annotation.ts | 10 +++ web/service/annotation.ts | 5 ++ 8 files changed, 262 insertions(+), 8 deletions(-) create mode 100644 web/app/components/app/annotation/batch-action.tsx diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py index c2ba880405..2af7136f14 100644 --- a/api/controllers/console/app/annotation.py +++ b/api/controllers/console/app/annotation.py @@ -131,8 +131,24 @@ class AnnotationListApi(Resource): raise Forbidden() app_id = str(app_id) - AppAnnotationService.clear_all_annotations(app_id) - return {"result": "success"}, 204 + + # Use request.args.getlist to get annotation_ids array directly + annotation_ids = request.args.getlist("annotation_id") + + # If annotation_ids are provided, handle batch deletion + if annotation_ids: + if not annotation_ids: + return { + "code": "bad_request", + "message": "annotation_ids are required if the parameter is provided.", + }, 400 + + result = AppAnnotationService.delete_app_annotations_in_batch(app_id, annotation_ids) + return result, 204 + # If no annotation_ids are provided, handle clearing all annotations + else: + AppAnnotationService.clear_all_annotations(app_id) + return {"result": "success"}, 204 class AnnotationExportApi(Resource): @@ -278,6 +294,7 @@ api.add_resource( ) api.add_resource(AnnotationListApi, "/apps//annotations") api.add_resource(AnnotationExportApi, "/apps//annotations/export") +api.add_resource(AnnotationCreateApi, "/apps//annotations") api.add_resource(AnnotationUpdateDeleteApi, "/apps//annotations/") api.add_resource(AnnotationBatchImportApi, "/apps//annotations/batch-import") api.add_resource(AnnotationBatchImportStatusApi, "/apps//annotations/batch-import-status/") diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index cfa917daf6..b7a047914e 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -266,6 +266,54 @@ class AppAnnotationService: annotation.id, app_id, current_user.current_tenant_id, app_annotation_setting.collection_binding_id ) + @classmethod + def delete_app_annotations_in_batch(cls, app_id: str, annotation_ids: list[str]): + # get app info + app = ( + db.session.query(App) + .where(App.id == app_id, App.tenant_id == current_user.current_tenant_id, App.status == "normal") + .first() + ) + + if not app: + raise NotFound("App not found") + + # Fetch annotations and their settings in a single query + annotations_to_delete = ( + db.session.query(MessageAnnotation, AppAnnotationSetting) + .outerjoin(AppAnnotationSetting, MessageAnnotation.app_id == AppAnnotationSetting.app_id) + .filter(MessageAnnotation.id.in_(annotation_ids)) + .all() + ) + + if not annotations_to_delete: + return {"deleted_count": 0} + + # Step 1: Extract IDs for bulk operations + annotation_ids_to_delete = [annotation.id for annotation, _ in annotations_to_delete] + + # Step 2: Bulk delete hit histories in a single query + db.session.query(AppAnnotationHitHistory).filter( + AppAnnotationHitHistory.annotation_id.in_(annotation_ids_to_delete) + ).delete(synchronize_session=False) + + # Step 3: Trigger async tasks for search index deletion + for annotation, annotation_setting in annotations_to_delete: + if annotation_setting: + delete_annotation_index_task.delay( + annotation.id, app_id, current_user.current_tenant_id, annotation_setting.collection_binding_id + ) + + # Step 4: Bulk delete annotations in a single query + deleted_count = ( + db.session.query(MessageAnnotation) + .filter(MessageAnnotation.id.in_(annotation_ids_to_delete)) + .delete(synchronize_session=False) + ) + + db.session.commit() + return {"deleted_count": deleted_count} + @classmethod def batch_import_app_annotations(cls, app_id, file: FileStorage) -> dict: # get app info diff --git a/web/app/components/app/annotation/batch-action.tsx b/web/app/components/app/annotation/batch-action.tsx new file mode 100644 index 0000000000..6e80d0c4c8 --- /dev/null +++ b/web/app/components/app/annotation/batch-action.tsx @@ -0,0 +1,79 @@ +import React, { type FC } from 'react' +import { RiDeleteBinLine } from '@remixicon/react' +import { useTranslation } from 'react-i18next' +import { useBoolean } from 'ahooks' +import Divider from '@/app/components/base/divider' +import classNames from '@/utils/classnames' +import Confirm from '@/app/components/base/confirm' + +const i18nPrefix = 'appAnnotation.batchAction' + +type IBatchActionProps = { + className?: string + selectedIds: string[] + onBatchDelete: () => Promise + onCancel: () => void +} + +const BatchAction: FC = ({ + className, + selectedIds, + onBatchDelete, + onCancel, +}) => { + const { t } = useTranslation() + const [isShowDeleteConfirm, { + setTrue: showDeleteConfirm, + setFalse: hideDeleteConfirm, + }] = useBoolean(false) + const [isDeleting, { + setTrue: setIsDeleting, + setFalse: setIsNotDeleting, + }] = useBoolean(false) + + const handleBatchDelete = async () => { + setIsDeleting() + await onBatchDelete() + hideDeleteConfirm() + setIsNotDeleting() + } + return ( +
    +
    +
    + + {selectedIds.length} + + {t(`${i18nPrefix}.selected`)} +
    + +
    + + +
    + + + +
    + { + isShowDeleteConfirm && ( + + ) + } +
    + ) +} + +export default React.memo(BatchAction) diff --git a/web/app/components/app/annotation/index.tsx b/web/app/components/app/annotation/index.tsx index 04bce1947b..0b0691eb7d 100644 --- a/web/app/components/app/annotation/index.tsx +++ b/web/app/components/app/annotation/index.tsx @@ -26,6 +26,7 @@ import { useProviderContext } from '@/context/provider-context' import AnnotationFullModal from '@/app/components/billing/annotation-full/modal' import type { App } from '@/types/app' import cn from '@/utils/classnames' +import { delAnnotations } from '@/service/annotation' type Props = { appDetail: App @@ -50,7 +51,9 @@ const Annotation: FC = (props) => { const [controlUpdateList, setControlUpdateList] = useState(Date.now()) const [currItem, setCurrItem] = useState(null) const [isShowViewModal, setIsShowViewModal] = useState(false) + const [selectedIds, setSelectedIds] = useState([]) const debouncedQueryParams = useDebounce(queryParams, { wait: 500 }) + const [isBatchDeleting, setIsBatchDeleting] = useState(false) const fetchAnnotationConfig = async () => { const res = await doFetchAnnotationConfig(appDetail.id) @@ -60,7 +63,6 @@ const Annotation: FC = (props) => { useEffect(() => { if (isChatApp) fetchAnnotationConfig() - // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const ensureJobCompleted = async (jobId: string, status: AnnotationEnableStatus) => { @@ -89,7 +91,6 @@ const Annotation: FC = (props) => { useEffect(() => { fetchList(currPage + 1) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [currPage, limit, debouncedQueryParams]) const handleAdd = async (payload: AnnotationItemBasic) => { @@ -106,6 +107,25 @@ const Annotation: FC = (props) => { setControlUpdateList(Date.now()) } + const handleBatchDelete = async () => { + if (isBatchDeleting) + return + setIsBatchDeleting(true) + try { + await delAnnotations(appDetail.id, selectedIds) + Toast.notify({ message: t('common.api.actionSuccess'), type: 'success' }) + fetchList() + setControlUpdateList(Date.now()) + setSelectedIds([]) + } + catch (e: any) { + Toast.notify({ type: 'error', message: e.message || t('common.api.actionFailed') }) + } + finally { + setIsBatchDeleting(false) + } + } + const handleView = (item: AnnotationItem) => { setCurrItem(item) setIsShowViewModal(true) @@ -189,6 +209,11 @@ const Annotation: FC = (props) => { list={list} onRemove={handleRemove} onView={handleView} + selectedIds={selectedIds} + onSelectedIdsChange={setSelectedIds} + onBatchDelete={handleBatchDelete} + onCancel={() => setSelectedIds([])} + isBatchDeleting={isBatchDeleting} /> :
    } diff --git a/web/app/components/app/annotation/list.tsx b/web/app/components/app/annotation/list.tsx index 319f09983f..6705ac5768 100644 --- a/web/app/components/app/annotation/list.tsx +++ b/web/app/components/app/annotation/list.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React from 'react' +import React, { useCallback, useMemo } from 'react' import { useTranslation } from 'react-i18next' import { RiDeleteBinLine, RiEditLine } from '@remixicon/react' import type { AnnotationItem } from './type' @@ -8,28 +8,67 @@ import RemoveAnnotationConfirmModal from './remove-annotation-confirm-modal' import ActionButton from '@/app/components/base/action-button' import useTimestamp from '@/hooks/use-timestamp' import cn from '@/utils/classnames' +import Checkbox from '@/app/components/base/checkbox' +import BatchAction from './batch-action' type Props = { list: AnnotationItem[] - onRemove: (id: string) => void onView: (item: AnnotationItem) => void + onRemove: (id: string) => void + selectedIds: string[] + onSelectedIdsChange: (selectedIds: string[]) => void + onBatchDelete: () => Promise + onCancel: () => void + isBatchDeleting?: boolean } const List: FC = ({ list, onView, onRemove, + selectedIds, + onSelectedIdsChange, + onBatchDelete, + onCancel, + isBatchDeleting, }) => { const { t } = useTranslation() const { formatTime } = useTimestamp() const [currId, setCurrId] = React.useState(null) const [showConfirmDelete, setShowConfirmDelete] = React.useState(false) + + const isAllSelected = useMemo(() => { + return list.length > 0 && list.every(item => selectedIds.includes(item.id)) + }, [list, selectedIds]) + + const isSomeSelected = useMemo(() => { + return list.some(item => selectedIds.includes(item.id)) + }, [list, selectedIds]) + + const handleSelectAll = useCallback(() => { + const currentPageIds = list.map(item => item.id) + const otherPageIds = selectedIds.filter(id => !currentPageIds.includes(id)) + + if (isAllSelected) + onSelectedIdsChange(otherPageIds) + else + onSelectedIdsChange([...otherPageIds, ...currentPageIds]) + }, [isAllSelected, list, selectedIds, onSelectedIdsChange]) + return ( -
    +
    - + + @@ -47,6 +86,18 @@ const List: FC = ({ } } > +
    {t('appAnnotation.table.header.question')} + + {t('appAnnotation.table.header.question')} {t('appAnnotation.table.header.answer')} {t('appAnnotation.table.header.createdAt')} {t('appAnnotation.table.header.hits')} e.stopPropagation()}> + { + if (selectedIds.includes(item.id)) + onSelectedIdsChange(selectedIds.filter(id => id !== item.id)) + else + onSelectedIdsChange([...selectedIds, item.id]) + }} + /> + = ({ setShowConfirmDelete(false) }} /> + {selectedIds.length > 0 && ( + + )} ) } diff --git a/web/i18n/en-US/app-annotation.ts b/web/i18n/en-US/app-annotation.ts index c0a8008d9a..f7cd24dc37 100644 --- a/web/i18n/en-US/app-annotation.ts +++ b/web/i18n/en-US/app-annotation.ts @@ -57,6 +57,16 @@ const translation = { error: 'Import Error', ok: 'OK', }, + list: { + delete: { + title: 'Are you sure Delete?', + }, + }, + batchAction: { + selected: 'Selected', + delete: 'Delete', + cancel: 'Cancel', + }, errorMessage: { answerRequired: 'Answer is required', queryRequired: 'Question is required', diff --git a/web/i18n/zh-Hans/app-annotation.ts b/web/i18n/zh-Hans/app-annotation.ts index cb2d3be0cd..d92dff8e62 100644 --- a/web/i18n/zh-Hans/app-annotation.ts +++ b/web/i18n/zh-Hans/app-annotation.ts @@ -57,6 +57,16 @@ const translation = { error: '导入出错', ok: '确定', }, + list: { + delete: { + title: '确定删除吗?', + }, + }, + batchAction: { + selected: '已选择', + delete: '删除', + cancel: '取消', + }, errorMessage: { answerRequired: '回复不能为空', queryRequired: '提问不能为空', diff --git a/web/service/annotation.ts b/web/service/annotation.ts index 9f025f8eb9..58efb7b976 100644 --- a/web/service/annotation.ts +++ b/web/service/annotation.ts @@ -60,6 +60,11 @@ export const delAnnotation = (appId: string, annotationId: string) => { return del(`apps/${appId}/annotations/${annotationId}`) } +export const delAnnotations = (appId: string, annotationIds: string[]) => { + const params = annotationIds.map(id => `annotation_id=${id}`).join('&') + return del(`/apps/${appId}/annotations?${params}`) +} + export const fetchHitHistoryList = (appId: string, annotationId: string, params: Record) => { return get(`apps/${appId}/annotations/${annotationId}/hit-histories`, { params }) } From 11ec62ca709e5236e5bb1ad5cd2f650a884138d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Wed, 30 Jul 2025 15:21:50 +0800 Subject: [PATCH 071/415] fix: element of Array[string] and Array[number] and size attribution (#23074) Co-authored-by: crazywoola <427733928@qq.com> --- .../components/filter-condition.tsx | 102 ++++++++++-------- web/app/components/workflow/nodes/utils.ts | 10 ++ 2 files changed, 70 insertions(+), 42 deletions(-) diff --git a/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx b/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx index a7ea6d78e7..ac303acfd7 100644 --- a/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx +++ b/web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx @@ -15,18 +15,21 @@ import cn from '@/utils/classnames' import { VarType } from '../../../types' const optionNameI18NPrefix = 'workflow.nodes.ifElse.optionName' +import { getConditionValueAsString } from '@/app/components/workflow/nodes/utils' const VAR_INPUT_SUPPORTED_KEYS: Record = { name: VarType.string, url: VarType.string, extension: VarType.string, mime_type: VarType.string, - related_id: VarType.number, + related_id: VarType.string, + size: VarType.number, } type Props = { condition: Condition onChange: (condition: Condition) => void + varType: VarType hasSubVariable: boolean readOnly: boolean nodeId: string @@ -34,6 +37,7 @@ type Props = { const FilterCondition: FC = ({ condition = { key: '', comparison_operator: ComparisonOperator.equal, value: '' }, + varType, onChange, hasSubVariable, readOnly, @@ -42,7 +46,7 @@ const FilterCondition: FC = ({ const { t } = useTranslation() const [isFocus, setIsFocus] = useState(false) - const expectedVarType = VAR_INPUT_SUPPORTED_KEYS[condition.key] + const expectedVarType = condition.key ? VAR_INPUT_SUPPORTED_KEYS[condition.key] : varType const supportVariableInput = !!expectedVarType const { availableVars, availableNodesWithParent } = useAvailableVarList(nodeId, { @@ -93,6 +97,59 @@ const FilterCondition: FC = ({ }) }, [onChange, expectedVarType]) + // Extract input rendering logic to avoid nested ternary + let inputElement: React.ReactNode = null + if (!comparisonOperatorNotRequireValue(condition.comparison_operator)) { + if (isSelect) { + inputElement = ( + + ) + } + else { + inputElement = ( + handleChange('value')(e.target.value)} + readOnly={readOnly} + /> + ) + } + } + return (
    {hasSubVariable && ( @@ -111,46 +168,7 @@ const FilterCondition: FC = ({ file={hasSubVariable ? { key: condition.key } : undefined} disabled={readOnly} /> - {!comparisonOperatorNotRequireValue(condition.comparison_operator) && ( - <> - {isSelect ? ( - - ) : ( - handleChange('value')(e.target.value)} - readOnly={readOnly} - /> - )} - - )} + {inputElement}
    ) diff --git a/web/app/components/workflow/nodes/utils.ts b/web/app/components/workflow/nodes/utils.ts index 262dde62e7..9e7b1ada6b 100644 --- a/web/app/components/workflow/nodes/utils.ts +++ b/web/app/components/workflow/nodes/utils.ts @@ -28,3 +28,13 @@ export const findVariableWhenOnLLMVision = (valueSelector: ValueSelector, availa formType, } } + +export const getConditionValueAsString = (condition: { value: any }) => { + if (Array.isArray(condition.value)) + return condition.value[0] ?? '' + + if (typeof condition.value === 'number') + return String(condition.value) + + return condition.value ?? '' +} From 28478cdc4147da511c87a4dce60b37afb9299858 Mon Sep 17 00:00:00 2001 From: kenwoodjw Date: Wed, 30 Jul 2025 16:13:45 +0800 Subject: [PATCH 072/415] feat: support metadata condition filter string array (#23111) Signed-off-by: kenwoodjw --- api/core/app/app_config/entities.py | 2 ++ api/core/rag/entities/metadata_entities.py | 2 ++ .../nodes/knowledge_retrieval/entities.py | 2 ++ .../knowledge_retrieval_node.py | 22 +++++++++++++++++++ .../metadata/condition-list/utils.ts | 2 ++ web/i18n/zh-Hans/workflow.ts | 4 ++-- 6 files changed, 32 insertions(+), 2 deletions(-) diff --git a/api/core/app/app_config/entities.py b/api/core/app/app_config/entities.py index 75bd2f677a..0df0aa59b2 100644 --- a/api/core/app/app_config/entities.py +++ b/api/core/app/app_config/entities.py @@ -148,6 +148,8 @@ SupportedComparisonOperator = Literal[ "is not", "empty", "not empty", + "in", + "not in", # for number "=", "≠", diff --git a/api/core/rag/entities/metadata_entities.py b/api/core/rag/entities/metadata_entities.py index 6ef932ad22..1f054bccdb 100644 --- a/api/core/rag/entities/metadata_entities.py +++ b/api/core/rag/entities/metadata_entities.py @@ -13,6 +13,8 @@ SupportedComparisonOperator = Literal[ "is not", "empty", "not empty", + "in", + "not in", # for number "=", "≠", diff --git a/api/core/workflow/nodes/knowledge_retrieval/entities.py b/api/core/workflow/nodes/knowledge_retrieval/entities.py index f1767bdf9e..b71271abeb 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/entities.py +++ b/api/core/workflow/nodes/knowledge_retrieval/entities.py @@ -74,6 +74,8 @@ SupportedComparisonOperator = Literal[ "is not", "empty", "not empty", + "in", + "not in", # for number "=", "≠", diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index e041e217ca..7303b68501 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -602,6 +602,28 @@ class KnowledgeRetrievalNode(BaseNode): **{key: metadata_name, key_value: f"%{value}"} ) ) + case "in": + if isinstance(value, str): + escaped_values = [v.strip().replace("'", "''") for v in str(value).split(",")] + escaped_value_str = ",".join(escaped_values) + else: + escaped_value_str = str(value) + filters.append( + (text(f"documents.doc_metadata ->> :{key} = any(string_to_array(:{key_value},','))")).params( + **{key: metadata_name, key_value: escaped_value_str} + ) + ) + case "not in": + if isinstance(value, str): + escaped_values = [v.strip().replace("'", "''") for v in str(value).split(",")] + escaped_value_str = ",".join(escaped_values) + else: + escaped_value_str = str(value) + filters.append( + (text(f"documents.doc_metadata ->> :{key} != all(string_to_array(:{key_value},','))")).params( + **{key: metadata_name, key_value: escaped_value_str} + ) + ) case "=" | "is": if isinstance(value, str): filters.append(Document.doc_metadata[metadata_name] == f'"{value}"') diff --git a/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts b/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts index 6397023991..10ee1aff1f 100644 --- a/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts +++ b/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/utils.ts @@ -32,6 +32,8 @@ export const getOperators = (type?: MetadataFilteringVariableType) => { ComparisonOperator.endWith, ComparisonOperator.empty, ComparisonOperator.notEmpty, + ComparisonOperator.in, + ComparisonOperator.notIn, ] case MetadataFilteringVariableType.number: return [ diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index 1f0300ae2a..b1c28c4666 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -591,8 +591,8 @@ const translation = { 'not empty': '不为空', 'null': '空', 'not null': '不为空', - 'in': '是', - 'not in': '不是', + 'in': '在', + 'not in': '不在', 'all of': '全部是', 'exists': '存在', 'not exists': '不存在', From bbdeb15501b1d122d4e87261b26baa123bc01c3e Mon Sep 17 00:00:00 2001 From: Sn0rt Date: Wed, 30 Jul 2025 16:39:54 +0800 Subject: [PATCH 073/415] fix: Support URL-encoded passwords with special characters in CELERY_BROKER_URL (#23163) Signed-off-by: Sn0rt --- api/schedule/queue_monitor_task.py | 19 +++--- .../unit_tests/configs/test_dify_config.py | 59 +++++++++++++++++++ 2 files changed, 67 insertions(+), 11 deletions(-) diff --git a/api/schedule/queue_monitor_task.py b/api/schedule/queue_monitor_task.py index a05e1358ed..4d517e5498 100644 --- a/api/schedule/queue_monitor_task.py +++ b/api/schedule/queue_monitor_task.py @@ -1,8 +1,8 @@ import logging from datetime import datetime -from urllib.parse import urlparse import click +from kombu.utils.url import parse_url # type: ignore from redis import Redis import app @@ -10,16 +10,13 @@ from configs import dify_config from extensions.ext_database import db from libs.email_i18n import EmailType, get_email_i18n_service -# Create a dedicated Redis connection (using the same configuration as Celery) -celery_broker_url = dify_config.CELERY_BROKER_URL - -parsed = urlparse(celery_broker_url) -host = parsed.hostname or "localhost" -port = parsed.port or 6379 -password = parsed.password or None -redis_db = parsed.path.strip("/") or "1" # type: ignore - -celery_redis = Redis(host=host, port=port, password=password, db=redis_db) +redis_config = parse_url(dify_config.CELERY_BROKER_URL) +celery_redis = Redis( + host=redis_config["hostname"], + port=redis_config["port"], + password=redis_config["password"], + db=int(redis_config["virtual_host"]) if redis_config["virtual_host"] else 1, +) @app.celery.task(queue="monitor") diff --git a/api/tests/unit_tests/configs/test_dify_config.py b/api/tests/unit_tests/configs/test_dify_config.py index e9d4ee1935..0ae6a09f5b 100644 --- a/api/tests/unit_tests/configs/test_dify_config.py +++ b/api/tests/unit_tests/configs/test_dify_config.py @@ -1,5 +1,6 @@ import os +import pytest from flask import Flask from packaging.version import Version from yarl import URL @@ -137,3 +138,61 @@ def test_db_extras_options_merging(monkeypatch): options = engine_options["connect_args"]["options"] assert "search_path=myschema" in options assert "timezone=UTC" in options + + +@pytest.mark.parametrize( + ("broker_url", "expected_host", "expected_port", "expected_username", "expected_password", "expected_db"), + [ + ("redis://localhost:6379/1", "localhost", 6379, None, None, "1"), + ("redis://:password@localhost:6379/1", "localhost", 6379, None, "password", "1"), + ("redis://:mypass%23123@localhost:6379/1", "localhost", 6379, None, "mypass#123", "1"), + ("redis://user:pass%40word@redis-host:6380/2", "redis-host", 6380, "user", "pass@word", "2"), + ("redis://admin:complex%23pass%40word@127.0.0.1:6379/0", "127.0.0.1", 6379, "admin", "complex#pass@word", "0"), + ( + "redis://user%40domain:secret%23123@redis.example.com:6380/3", + "redis.example.com", + 6380, + "user@domain", + "secret#123", + "3", + ), + # Password containing %23 substring (double encoding scenario) + ("redis://:mypass%2523@localhost:6379/1", "localhost", 6379, None, "mypass%23", "1"), + # Username and password both containing encoded characters + ("redis://user%2525%40:pass%2523@localhost:6379/1", "localhost", 6379, "user%25@", "pass%23", "1"), + ], +) +def test_celery_broker_url_with_special_chars_password( + monkeypatch, broker_url, expected_host, expected_port, expected_username, expected_password, expected_db +): + """Test that CELERY_BROKER_URL with various formats are handled correctly.""" + from kombu.utils.url import parse_url + + # clear system environment variables + os.environ.clear() + + # Set up basic required environment variables (following existing pattern) + monkeypatch.setenv("CONSOLE_API_URL", "https://example.com") + monkeypatch.setenv("CONSOLE_WEB_URL", "https://example.com") + monkeypatch.setenv("DB_USERNAME", "postgres") + monkeypatch.setenv("DB_PASSWORD", "postgres") + monkeypatch.setenv("DB_HOST", "localhost") + monkeypatch.setenv("DB_PORT", "5432") + monkeypatch.setenv("DB_DATABASE", "dify") + + # Set the CELERY_BROKER_URL to test + monkeypatch.setenv("CELERY_BROKER_URL", broker_url) + + # Create config and verify the URL is stored correctly + config = DifyConfig() + assert broker_url == config.CELERY_BROKER_URL + + # Test actual parsing behavior using kombu's parse_url (same as production) + redis_config = parse_url(config.CELERY_BROKER_URL) + + # Verify the parsing results match expectations (using kombu's field names) + assert redis_config["hostname"] == expected_host + assert redis_config["port"] == expected_port + assert redis_config["userid"] == expected_username # kombu uses 'userid' not 'username' + assert redis_config["password"] == expected_password + assert redis_config["virtual_host"] == expected_db # kombu uses 'virtual_host' not 'db' From 070379a900347001f2bce8630e059493ce9e9004 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Wed, 30 Jul 2025 17:04:31 +0800 Subject: [PATCH 074/415] minor fix: fix wrong check of annotation_ids (#23164) --- api/controllers/console/app/annotation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py index 2af7136f14..472c694c36 100644 --- a/api/controllers/console/app/annotation.py +++ b/api/controllers/console/app/annotation.py @@ -137,7 +137,8 @@ class AnnotationListApi(Resource): # If annotation_ids are provided, handle batch deletion if annotation_ids: - if not annotation_ids: + # Check if any annotation_ids contain empty strings or invalid values + if not all(annotation_id.strip() for annotation_id in annotation_ids if annotation_id): return { "code": "bad_request", "message": "annotation_ids are required if the parameter is provided.", From 07cff1ed2c6259623fe2b50b780d10182b36987d Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Wed, 30 Jul 2025 17:05:02 +0800 Subject: [PATCH 075/415] minor fix: fix flask api resources only accept one resource for same url (#23168) --- api/controllers/console/app/annotation.py | 41 +++++++++++------------ 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py index 472c694c36..007b1f6d3d 100644 --- a/api/controllers/console/app/annotation.py +++ b/api/controllers/console/app/annotation.py @@ -100,7 +100,7 @@ class AnnotationReplyActionStatusApi(Resource): return {"job_id": job_id, "job_status": job_status, "error_msg": error_msg}, 200 -class AnnotationListApi(Resource): +class AnnotationApi(Resource): @setup_required @login_required @account_initialization_required @@ -123,6 +123,23 @@ class AnnotationListApi(Resource): } return response, 200 + @setup_required + @login_required + @account_initialization_required + @cloud_edition_billing_resource_check("annotation") + @marshal_with(annotation_fields) + def post(self, app_id): + if not current_user.is_editor: + raise Forbidden() + + app_id = str(app_id) + parser = reqparse.RequestParser() + parser.add_argument("question", required=True, type=str, location="json") + parser.add_argument("answer", required=True, type=str, location="json") + args = parser.parse_args() + annotation = AppAnnotationService.insert_app_annotation_directly(args, app_id) + return annotation + @setup_required @login_required @account_initialization_required @@ -166,25 +183,6 @@ class AnnotationExportApi(Resource): return response, 200 -class AnnotationCreateApi(Resource): - @setup_required - @login_required - @account_initialization_required - @cloud_edition_billing_resource_check("annotation") - @marshal_with(annotation_fields) - def post(self, app_id): - if not current_user.is_editor: - raise Forbidden() - - app_id = str(app_id) - parser = reqparse.RequestParser() - parser.add_argument("question", required=True, type=str, location="json") - parser.add_argument("answer", required=True, type=str, location="json") - args = parser.parse_args() - annotation = AppAnnotationService.insert_app_annotation_directly(args, app_id) - return annotation - - class AnnotationUpdateDeleteApi(Resource): @setup_required @login_required @@ -293,9 +291,8 @@ api.add_resource(AnnotationReplyActionApi, "/apps//annotation-reply api.add_resource( AnnotationReplyActionStatusApi, "/apps//annotation-reply//status/" ) -api.add_resource(AnnotationListApi, "/apps//annotations") +api.add_resource(AnnotationApi, "/apps//annotations") api.add_resource(AnnotationExportApi, "/apps//annotations/export") -api.add_resource(AnnotationCreateApi, "/apps//annotations") api.add_resource(AnnotationUpdateDeleteApi, "/apps//annotations/") api.add_resource(AnnotationBatchImportApi, "/apps//annotations/batch-import") api.add_resource(AnnotationBatchImportStatusApi, "/apps//annotations/batch-import-status/") From 4e2129d74f298ae3d1d91a917e0b0032be2a2cb2 Mon Sep 17 00:00:00 2001 From: Will Date: Wed, 30 Jul 2025 18:00:15 +0800 Subject: [PATCH 076/415] fix: Error processing trace tasks (#23170) --- api/core/ops/ops_trace_manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index a607c76beb..b769934a9b 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -407,7 +407,6 @@ class TraceTask: def __init__( self, trace_type: Any, - trace_id: Optional[str] = None, message_id: Optional[str] = None, workflow_execution: Optional[WorkflowExecution] = None, conversation_id: Optional[str] = None, @@ -423,7 +422,7 @@ class TraceTask: self.timer = timer self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") self.app_id = None - + self.trace_id = None self.kwargs = kwargs external_trace_id = kwargs.get("external_trace_id") if external_trace_id: From 270dd955d01b2fc9aa9e2a9fd9d48f68224bd68c Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 30 Jul 2025 18:00:41 +0800 Subject: [PATCH 077/415] chore(i18n): sync missing keys in zh-Hans and ja-JP (#23175) --- web/i18n/ja-JP/app-annotation.ts | 10 ++++++++++ web/i18n/zh-Hans/time.ts | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/web/i18n/ja-JP/app-annotation.ts b/web/i18n/ja-JP/app-annotation.ts index 801be7c672..6d7edf7077 100644 --- a/web/i18n/ja-JP/app-annotation.ts +++ b/web/i18n/ja-JP/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: '保存', }, embeddingModelSwitchTip: '注釈テキストのベクトル化モデルです。モデルを切り替えると再埋め込みが行われ、追加のコストが発生します。', + list: { + delete: { + title: '本当に削除しますか?', + }, + }, + batchAction: { + cancel: 'キャンセル', + delete: '削除する', + selected: '選択された', + }, } export default translation diff --git a/web/i18n/zh-Hans/time.ts b/web/i18n/zh-Hans/time.ts index 8a223d9dd1..a7a1c6e574 100644 --- a/web/i18n/zh-Hans/time.ts +++ b/web/i18n/zh-Hans/time.ts @@ -26,11 +26,11 @@ const translation = { now: '此刻', ok: '确定', cancel: '取消', + pickDate: '选择日期', }, title: { pickTime: '选择时间', }, - pickDate: '选择日期', defaultPlaceholder: '请选择时间...', } From 8c6d87f08a6e1a555459346172465901a83416cf Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Wed, 30 Jul 2025 21:31:23 +0800 Subject: [PATCH 078/415] chore: Update vulnerable eslint dependencies (#23192) --- web/package.json | 7 +- web/pnpm-lock.yaml | 567 ++++++++++++++++++++++----------------------- 2 files changed, 283 insertions(+), 291 deletions(-) diff --git a/web/package.json b/web/package.json index d93788a368..a03334e4a4 100644 --- a/web/package.json +++ b/web/package.json @@ -160,7 +160,7 @@ "@faker-js/faker": "^9.0.3", "@happy-dom/jest-environment": "^17.4.4", "@next/bundle-analyzer": "^15.4.1", - "@next/eslint-plugin-next": "~15.4.4", + "@next/eslint-plugin-next": "~15.4.5", "@rgrove/parse-xml": "^4.1.0", "@storybook/addon-essentials": "8.5.0", "@storybook/addon-interactions": "8.5.0", @@ -196,8 +196,8 @@ "bing-translate-api": "^4.0.2", "code-inspector-plugin": "^0.18.1", "cross-env": "^7.0.3", - "eslint": "^9.20.1", - "eslint-config-next": "~15.4.4", + "eslint": "^9.32.0", + "eslint-config-next": "~15.4.5", "eslint-plugin-oxlint": "^1.6.0", "eslint-plugin-react-hooks": "^5.1.0", "eslint-plugin-react-refresh": "^0.4.19", @@ -234,6 +234,7 @@ }, "pnpm": { "overrides": { + "@eslint/plugin-kit@<0.3.4": "0.3.4", "esbuild@<0.25.0": "0.25.0", "pbkdf2@<3.1.3": "3.1.3", "vite@<6.2.7": "6.2.7", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 58153b9fc1..1e36696a8a 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -8,6 +8,7 @@ overrides: '@types/react': ~19.1.8 '@types/react-dom': ~19.1.6 string-width: 4.2.3 + '@eslint/plugin-kit@<0.3.4': 0.3.4 esbuild@<0.25.0: 0.25.0 pbkdf2@<3.1.3: 3.1.3 vite@<6.2.7: 6.2.7 @@ -58,7 +59,7 @@ importers: version: 1.2.1 '@eslint/compat': specifier: ^1.2.4 - version: 1.3.1(eslint@9.31.0(jiti@1.21.7)) + version: 1.3.1(eslint@9.32.0(jiti@1.21.7)) '@floating-ui/react': specifier: ^0.26.25 version: 0.26.28(react-dom@19.1.0(react@19.1.0))(react@19.1.0) @@ -380,13 +381,13 @@ importers: devDependencies: '@antfu/eslint-config': specifier: ^5.0.0 - version: 5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + version: 5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.5)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.32.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.32.0(jiti@1.21.7)))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@chromatic-com/storybook': specifier: ^3.1.0 version: 3.2.7(react@19.1.0)(storybook@8.5.0) '@eslint-react/eslint-plugin': specifier: ^1.15.0 - version: 1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + version: 1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) '@eslint/eslintrc': specifier: ^3.1.0 version: 3.3.1 @@ -403,8 +404,8 @@ importers: specifier: ^15.4.1 version: 15.4.1 '@next/eslint-plugin-next': - specifier: ~15.4.4 - version: 15.4.4 + specifier: ~15.4.5 + version: 15.4.5 '@rgrove/parse-xml': specifier: ^4.1.0 version: 4.2.0 @@ -511,26 +512,26 @@ importers: specifier: ^7.0.3 version: 7.0.3 eslint: - specifier: ^9.20.1 - version: 9.31.0(jiti@1.21.7) + specifier: ^9.32.0 + version: 9.32.0(jiti@1.21.7) eslint-config-next: - specifier: ~15.4.4 - version: 15.4.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + specifier: ~15.4.5 + version: 15.4.5(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-oxlint: specifier: ^1.6.0 version: 1.6.0 eslint-plugin-react-hooks: specifier: ^5.1.0 - version: 5.2.0(eslint@9.31.0(jiti@1.21.7)) + version: 5.2.0(eslint@9.32.0(jiti@1.21.7)) eslint-plugin-react-refresh: specifier: ^0.4.19 - version: 0.4.20(eslint@9.31.0(jiti@1.21.7)) + version: 0.4.20(eslint@9.32.0(jiti@1.21.7)) eslint-plugin-sonarjs: specifier: ^3.0.2 - version: 3.0.4(eslint@9.31.0(jiti@1.21.7)) + version: 3.0.4(eslint@9.32.0(jiti@1.21.7)) eslint-plugin-storybook: specifier: ^0.11.2 - version: 0.11.6(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + version: 0.11.6(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-tailwindcss: specifier: ^3.18.0 version: 3.18.2(tailwindcss@3.4.17(ts-node@10.9.2(@types/node@18.15.0)(typescript@5.8.3))) @@ -572,7 +573,7 @@ importers: version: 5.8.3 typescript-eslint: specifier: ^8.38.0 - version: 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + version: 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) uglify-js: specifier: ^3.19.3 version: 3.19.3 @@ -1578,6 +1579,10 @@ packages: resolution: {integrity: sha512-LOm5OVt7D4qiKCqoiPbA7LWmI+tbw1VbTUowBcUMgQSuM6poJufkFkYDcQpo5KfgD39TnNySV26QjOh7VFpSyw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@eslint/js@9.32.0': + resolution: {integrity: sha512-BBpRFZK3eX6uMLKz8WxFOBIFFcGFJ/g8XuwjTHCqHROSIsopI+ddn/d5Cfh36+7+e5edVS8dbSHnBNhrLEX0zg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@eslint/markdown@7.1.0': resolution: {integrity: sha512-Y+X1B1j+/zupKDVJfkKc8uYMjQkGzfnd8lt7vK3y8x9Br6H5dBuhAfFrQ6ff7HAMm/1BwgecyEiRFkYCWPRxmA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -1586,10 +1591,6 @@ packages: resolution: {integrity: sha512-RBMg5FRL0I0gs51M/guSAj5/e14VQ4tpZnQNWwuDT66P14I43ItmPfIZRhO9fUVIPOAQXU47atlywZ/czoqFPA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/plugin-kit@0.3.3': - resolution: {integrity: sha512-1+WqvgNMhmlAambTvT3KPtCl/Ibr68VldY2XY40SL1CE0ZXiakFR/cbTspaF5HsnpDMvcYYoJHfl4980NBjGag==} - engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/plugin-kit@0.3.4': resolution: {integrity: sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -2110,8 +2111,8 @@ packages: '@next/env@15.3.5': resolution: {integrity: sha512-7g06v8BUVtN2njAX/r8gheoVffhiKFVt4nx74Tt6G4Hqw9HCLYQVx/GkH2qHvPtAHZaUNZ0VXAa0pQP6v1wk7g==} - '@next/eslint-plugin-next@15.4.4': - resolution: {integrity: sha512-1FDsyN//ai3Jd97SEd7scw5h1yLdzDACGOPRofr2GD3sEFsBylEEoL0MHSerd4n2dq9Zm/mFMqi4+NRMOreOKA==} + '@next/eslint-plugin-next@15.4.5': + resolution: {integrity: sha512-YhbrlbEt0m4jJnXHMY/cCUDBAWgd5SaTa5mJjzOt82QwflAFfW/h3+COp2TfVSzhmscIZ5sg2WXt3MLziqCSCw==} '@next/mdx@15.3.5': resolution: {integrity: sha512-/2rRCgPKNp2ttQscU13auI+cYYACdPa80Okgi/1+NNJJeWn9yVxwGnqZc3SX30T889bZbLqcY4oUjqYGAygL4g==} @@ -2726,9 +2727,6 @@ packages: '@storybook/csf@0.1.12': resolution: {integrity: sha512-9/exVhabisyIVL0VxTCxo01Tdm8wefIXKXfltAPTSr8cbLn5JAxGQ6QV3mjdecLGEOucfoVhAKtJfVHxEK1iqw==} - '@storybook/csf@0.1.13': - resolution: {integrity: sha512-7xOOwCLGB3ebM87eemep89MYRFTko+D8qE7EdAAq74lgdqRR5cOUtYWJLjO2dLtP94nqoOdHJo6MdLLKzg412Q==} - '@storybook/global@5.0.0': resolution: {integrity: sha512-FcOqPAXACP0I3oJ/ws6/rrPT9WGhu915Cg8D02a9YxLo0DE9zI+a9A5gRGvmQ09fiWPukqI8ZAEoQEdWUKMQdQ==} @@ -4691,8 +4689,8 @@ packages: peerDependencies: eslint: ^9.5.0 - eslint-config-next@15.4.4: - resolution: {integrity: sha512-sK/lWLUVF5om18O5w76Jt3F8uzu/LP5mVa6TprCMWkjWHUmByq80iHGHcdH7k1dLiJlj+DRIWf98d5piwRsSuA==} + eslint-config-next@15.4.5: + resolution: {integrity: sha512-IMijiXaZ43qFB+Gcpnb374ipTKD8JIyVNR+6VsifFQ/LHyx+A9wgcgSIhCX5PYSjwOoSYD5LtNHKlM5uc23eww==} peerDependencies: eslint: ^7.23.0 || ^8.0.0 || ^9.0.0 typescript: '>=3.3.1' @@ -4997,8 +4995,8 @@ packages: resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - eslint@9.31.0: - resolution: {integrity: sha512-QldCVh/ztyKJJZLr4jXNUByx3gR+TDYZCRXEktiZoUR3PGy4qCmSbkxcIle8GEwGpb5JBZazlaJ/CxLidXdEbQ==} + eslint@9.32.0: + resolution: {integrity: sha512-LSehfdpgMeWcTZkWZVIJl+tkZ2nuSkyyB9C27MZqFWXuph7DvaowgcTvKqxvpLW1JZIk8PN7hFY3Rj9LQ7m7lg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} hasBin: true peerDependencies: @@ -8285,50 +8283,50 @@ snapshots: '@jridgewell/gen-mapping': 0.3.12 '@jridgewell/trace-mapping': 0.3.29 - '@antfu/eslint-config@5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.4)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@antfu/eslint-config@5.0.0(@eslint-react/eslint-plugin@1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3))(@next/eslint-plugin-next@15.4.5)(@vue/compiler-sfc@3.5.17)(eslint-plugin-react-hooks@5.2.0(eslint@9.32.0(jiti@1.21.7)))(eslint-plugin-react-refresh@0.4.20(eslint@9.32.0(jiti@1.21.7)))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@antfu/install-pkg': 1.1.0 '@clack/prompts': 0.11.0 - '@eslint-community/eslint-plugin-eslint-comments': 4.5.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-plugin-eslint-comments': 4.5.0(eslint@9.32.0(jiti@1.21.7)) '@eslint/markdown': 7.1.0 - '@stylistic/eslint-plugin': 5.2.2(eslint@9.31.0(jiti@1.21.7)) - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@vitest/eslint-plugin': 1.3.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@stylistic/eslint-plugin': 5.2.2(eslint@9.32.0(jiti@1.21.7)) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@vitest/eslint-plugin': 1.3.4(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) ansis: 4.1.0 cac: 6.7.14 - eslint: 9.31.0(jiti@1.21.7) - eslint-config-flat-gitignore: 2.1.0(eslint@9.31.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-config-flat-gitignore: 2.1.0(eslint@9.32.0(jiti@1.21.7)) eslint-flat-config-utils: 2.1.0 - eslint-merge-processors: 2.0.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-antfu: 3.1.1(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-command: 3.3.1(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-import-lite: 0.3.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-jsdoc: 51.4.1(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-jsonc: 2.20.1(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-n: 17.21.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + eslint-merge-processors: 2.0.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-antfu: 3.1.1(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-command: 3.3.1(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-import-lite: 0.3.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-jsdoc: 51.4.1(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-jsonc: 2.20.1(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-n: 17.21.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) eslint-plugin-no-only-tests: 3.3.0 - eslint-plugin-perfectionist: 4.15.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-pnpm: 1.1.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-regexp: 2.9.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-toml: 0.12.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-unicorn: 60.0.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))) - eslint-plugin-yml: 1.18.0(eslint@9.31.0(jiti@1.21.7)) - eslint-processor-vue-blocks: 2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-perfectionist: 4.15.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-pnpm: 1.1.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-regexp: 2.9.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-toml: 0.12.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-unicorn: 60.0.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-unused-imports: 4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-vue: 10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.32.0(jiti@1.21.7))) + eslint-plugin-yml: 1.18.0(eslint@9.32.0(jiti@1.21.7)) + eslint-processor-vue-blocks: 2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.32.0(jiti@1.21.7)) globals: 16.3.0 jsonc-eslint-parser: 2.4.0 local-pkg: 1.1.1 parse-gitignore: 2.0.0 toml-eslint-parser: 0.10.0 - vue-eslint-parser: 10.2.0(eslint@9.31.0(jiti@1.21.7)) + vue-eslint-parser: 10.2.0(eslint@9.32.0(jiti@1.21.7)) yaml-eslint-parser: 1.3.0 optionalDependencies: - '@eslint-react/eslint-plugin': 1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) - '@next/eslint-plugin-next': 15.4.4 - eslint-plugin-react-hooks: 5.2.0(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-react-refresh: 0.4.20(eslint@9.31.0(jiti@1.21.7)) + '@eslint-react/eslint-plugin': 1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + '@next/eslint-plugin-next': 15.4.5 + eslint-plugin-react-hooks: 5.2.0(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-react-refresh: 0.4.20(eslint@9.32.0(jiti@1.21.7)) transitivePeerDependencies: - '@eslint/json' - '@vue/compiler-sfc' @@ -9323,25 +9321,25 @@ snapshots: '@esbuild/win32-x64@0.25.0': optional: true - '@eslint-community/eslint-plugin-eslint-comments@4.5.0(eslint@9.31.0(jiti@1.21.7))': + '@eslint-community/eslint-plugin-eslint-comments@4.5.0(eslint@9.32.0(jiti@1.21.7))': dependencies: escape-string-regexp: 4.0.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) ignore: 5.3.2 - '@eslint-community/eslint-utils@4.7.0(eslint@9.31.0(jiti@1.21.7))': + '@eslint-community/eslint-utils@4.7.0(eslint@9.32.0(jiti@1.21.7))': dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) eslint-visitor-keys: 3.4.3 '@eslint-community/regexpp@4.12.1': {} - '@eslint-react/ast@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@eslint-react/ast@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -9349,17 +9347,17 @@ snapshots: - supports-color - typescript - '@eslint-react/core@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@eslint-react/core@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) birecord: 0.1.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -9369,32 +9367,32 @@ snapshots: '@eslint-react/eff@1.52.3': {} - '@eslint-react/eslint-plugin@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3)': + '@eslint-react/eslint-plugin@1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) - eslint-plugin-react-debug: 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-react-dom: 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-react-hooks-extra: 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-react-naming-convention: 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-react-web-api: 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-react-x: 1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) + eslint-plugin-react-debug: 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-react-dom: 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-react-hooks-extra: 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-react-naming-convention: 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-react-web-api: 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint-plugin-react-x: 1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3) optionalDependencies: typescript: 5.8.3 transitivePeerDependencies: - supports-color - ts-api-utils - '@eslint-react/kit@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@eslint-react/kit@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) ts-pattern: 5.7.1 zod: 4.0.5 transitivePeerDependencies: @@ -9402,11 +9400,11 @@ snapshots: - supports-color - typescript - '@eslint-react/shared@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@eslint-react/shared@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) ts-pattern: 5.7.1 zod: 4.0.5 transitivePeerDependencies: @@ -9414,13 +9412,13 @@ snapshots: - supports-color - typescript - '@eslint-react/var@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@eslint-react/var@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 transitivePeerDependencies: @@ -9428,9 +9426,9 @@ snapshots: - supports-color - typescript - '@eslint/compat@1.3.1(eslint@9.31.0(jiti@1.21.7))': + '@eslint/compat@1.3.1(eslint@9.32.0(jiti@1.21.7))': optionalDependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) '@eslint/config-array@0.21.0': dependencies: @@ -9462,6 +9460,8 @@ snapshots: '@eslint/js@9.31.0': {} + '@eslint/js@9.32.0': {} + '@eslint/markdown@7.1.0': dependencies: '@eslint/core': 0.15.1 @@ -9477,11 +9477,6 @@ snapshots: '@eslint/object-schema@2.1.6': {} - '@eslint/plugin-kit@0.3.3': - dependencies: - '@eslint/core': 0.15.1 - levn: 0.4.1 - '@eslint/plugin-kit@0.3.4': dependencies: '@eslint/core': 0.15.1 @@ -10186,7 +10181,7 @@ snapshots: '@next/env@15.3.5': {} - '@next/eslint-plugin-next@15.4.4': + '@next/eslint-plugin-next@15.4.5': dependencies: fast-glob: 3.3.1 @@ -10860,10 +10855,6 @@ snapshots: dependencies: type-fest: 2.19.0 - '@storybook/csf@0.1.13': - dependencies: - type-fest: 2.19.0 - '@storybook/global@5.0.0': {} '@storybook/icons@1.4.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': @@ -11038,11 +11029,11 @@ snapshots: dependencies: storybook: 8.5.0 - '@stylistic/eslint-plugin@5.2.2(eslint@9.31.0(jiti@1.21.7))': + '@stylistic/eslint-plugin@5.2.2(eslint@9.32.0(jiti@1.21.7))': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@typescript-eslint/types': 8.38.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) eslint-visitor-keys: 4.2.1 espree: 10.4.0 estraverse: 5.3.0 @@ -11476,15 +11467,15 @@ snapshots: dependencies: '@types/yargs-parser': 21.0.3 - '@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@eslint-community/regexpp': 4.12.1 - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.38.0 - '@typescript-eslint/type-utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/visitor-keys': 8.38.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) graphemer: 1.4.0 ignore: 7.0.5 natural-compare: 1.4.0 @@ -11493,14 +11484,14 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@typescript-eslint/scope-manager': 8.38.0 '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) '@typescript-eslint/visitor-keys': 8.38.0 debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -11541,25 +11532,25 @@ snapshots: dependencies: typescript: 5.8.3 - '@typescript-eslint/type-utils@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/type-utils@8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) ts-api-utils: 2.1.0(typescript@5.8.3) typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/type-utils@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/type-utils@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) ts-api-utils: 2.1.0(typescript@5.8.3) typescript: 5.8.3 transitivePeerDependencies: @@ -11601,24 +11592,24 @@ snapshots: transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/utils@8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 '@typescript-eslint/typescript-estree': 8.37.0(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) typescript: 5.8.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@typescript-eslint/utils@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@typescript-eslint/scope-manager': 8.38.0 '@typescript-eslint/types': 8.38.0 '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -11694,10 +11685,10 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vitest/eslint-plugin@1.3.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3)': + '@vitest/eslint-plugin@1.3.4(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3)': dependencies: - '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) optionalDependencies: typescript: 5.8.3 transitivePeerDependencies: @@ -13076,34 +13067,34 @@ snapshots: escape-string-regexp@5.0.0: {} - eslint-compat-utils@0.5.1(eslint@9.31.0(jiti@1.21.7)): + eslint-compat-utils@0.5.1(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) semver: 7.7.2 - eslint-compat-utils@0.6.5(eslint@9.31.0(jiti@1.21.7)): + eslint-compat-utils@0.6.5(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) semver: 7.7.2 - eslint-config-flat-gitignore@2.1.0(eslint@9.31.0(jiti@1.21.7)): + eslint-config-flat-gitignore@2.1.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - '@eslint/compat': 1.3.1(eslint@9.31.0(jiti@1.21.7)) - eslint: 9.31.0(jiti@1.21.7) + '@eslint/compat': 1.3.1(eslint@9.32.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) - eslint-config-next@15.4.4(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-config-next@15.4.5(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@next/eslint-plugin-next': 15.4.4 + '@next/eslint-plugin-next': 15.4.5 '@rushstack/eslint-patch': 1.12.0 - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-jsx-a11y: 6.10.2(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-react: 7.37.5(eslint@9.31.0(jiti@1.21.7)) - eslint-plugin-react-hooks: 5.2.0(eslint@9.31.0(jiti@1.21.7)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-jsx-a11y: 6.10.2(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-react: 7.37.5(eslint@9.32.0(jiti@1.21.7)) + eslint-plugin-react-hooks: 5.2.0(eslint@9.32.0(jiti@1.21.7)) optionalDependencies: typescript: 5.8.3 transitivePeerDependencies: @@ -13123,67 +13114,67 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@9.31.0(jiti@1.21.7)): + eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.32.0)(eslint@9.32.0(jiti@1.21.7)): dependencies: '@nolyfill/is-core-module': 1.0.39 debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) get-tsconfig: 4.10.1 is-bun-module: 2.0.0 stable-hash: 0.0.5 tinyglobby: 0.2.14 unrs-resolver: 1.11.1 optionalDependencies: - eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) + eslint-plugin-import: 2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.32.0(jiti@1.21.7)) transitivePeerDependencies: - supports-color - eslint-json-compat-utils@0.2.1(eslint@9.31.0(jiti@1.21.7))(jsonc-eslint-parser@2.4.0): + eslint-json-compat-utils@0.2.1(eslint@9.32.0(jiti@1.21.7))(jsonc-eslint-parser@2.4.0): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) esquery: 1.6.0 jsonc-eslint-parser: 2.4.0 - eslint-merge-processors@2.0.0(eslint@9.31.0(jiti@1.21.7)): + eslint-merge-processors@2.0.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) - eslint-module-utils@2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): + eslint-module-utils@2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.32.0(jiti@1.21.7)): dependencies: debug: 3.2.7 optionalDependencies: - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 - eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.31.0(jiti@1.21.7)) + eslint-import-resolver-typescript: 3.10.1(eslint-plugin-import@2.32.0)(eslint@9.32.0(jiti@1.21.7)) transitivePeerDependencies: - supports-color - eslint-plugin-antfu@3.1.1(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-antfu@3.1.1(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) - eslint-plugin-command@3.3.1(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-command@3.3.1(eslint@9.32.0(jiti@1.21.7)): dependencies: '@es-joy/jsdoccomment': 0.50.2 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) - eslint-plugin-es-x@7.8.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-es-x@7.8.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@eslint-community/regexpp': 4.12.1 - eslint: 9.31.0(jiti@1.21.7) - eslint-compat-utils: 0.5.1(eslint@9.31.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-compat-utils: 0.5.1(eslint@9.32.0(jiti@1.21.7)) - eslint-plugin-import-lite@0.3.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-import-lite@0.3.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@typescript-eslint/types': 8.38.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) optionalDependencies: typescript: 5.8.3 - eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-import@2.32.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-typescript@3.10.1)(eslint@9.32.0(jiti@1.21.7)): dependencies: '@rtsao/scc': 1.1.0 array-includes: '@nolyfill/array-includes@1.0.44' @@ -13192,9 +13183,9 @@ snapshots: array.prototype.flatmap: '@nolyfill/array.prototype.flatmap@1.0.44' debug: 3.2.7 doctrine: 2.1.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) eslint-import-resolver-node: 0.3.9 - eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.31.0(jiti@1.21.7)) + eslint-module-utils: 2.12.1(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint-import-resolver-node@0.3.9)(eslint-import-resolver-typescript@3.10.1)(eslint@9.32.0(jiti@1.21.7)) hasown: '@nolyfill/hasown@1.0.44' is-core-module: '@nolyfill/is-core-module@1.0.39' is-glob: 4.0.3 @@ -13206,20 +13197,20 @@ snapshots: string.prototype.trimend: '@nolyfill/string.prototype.trimend@1.0.44' tsconfig-paths: 3.15.0 optionalDependencies: - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack - supports-color - eslint-plugin-jsdoc@51.4.1(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-jsdoc@51.4.1(eslint@9.32.0(jiti@1.21.7)): dependencies: '@es-joy/jsdoccomment': 0.52.0 are-docs-informative: 0.0.2 comment-parser: 1.4.1 debug: 4.4.1 escape-string-regexp: 4.0.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) espree: 10.4.0 esquery: 1.6.0 parse-imports-exports: 0.2.4 @@ -13228,12 +13219,12 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-jsonc@2.20.1(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-jsonc@2.20.1(eslint@9.32.0(jiti@1.21.7)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) - eslint: 9.31.0(jiti@1.21.7) - eslint-compat-utils: 0.6.5(eslint@9.31.0(jiti@1.21.7)) - eslint-json-compat-utils: 0.2.1(eslint@9.31.0(jiti@1.21.7))(jsonc-eslint-parser@2.4.0) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-compat-utils: 0.6.5(eslint@9.32.0(jiti@1.21.7)) + eslint-json-compat-utils: 0.2.1(eslint@9.32.0(jiti@1.21.7))(jsonc-eslint-parser@2.4.0) espree: 10.4.0 graphemer: 1.4.0 jsonc-eslint-parser: 2.4.0 @@ -13242,7 +13233,7 @@ snapshots: transitivePeerDependencies: - '@eslint/json' - eslint-plugin-jsx-a11y@6.10.2(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-jsx-a11y@6.10.2(eslint@9.32.0(jiti@1.21.7)): dependencies: aria-query: 5.3.2 array-includes: '@nolyfill/array-includes@1.0.44' @@ -13252,7 +13243,7 @@ snapshots: axobject-query: 4.1.0 damerau-levenshtein: 1.0.8 emoji-regex: 9.2.2 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) hasown: '@nolyfill/hasown@1.0.44' jsx-ast-utils: 3.3.5 language-tags: 1.0.9 @@ -13261,12 +13252,12 @@ snapshots: safe-regex-test: '@nolyfill/safe-regex-test@1.0.44' string.prototype.includes: '@nolyfill/string.prototype.includes@1.0.44' - eslint-plugin-n@17.21.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-n@17.21.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) enhanced-resolve: 5.18.2 - eslint: 9.31.0(jiti@1.21.7) - eslint-plugin-es-x: 7.8.0(eslint@9.31.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-plugin-es-x: 7.8.0(eslint@9.32.0(jiti@1.21.7)) get-tsconfig: 4.10.1 globals: 15.15.0 ignore: 5.3.2 @@ -13282,19 +13273,19 @@ snapshots: dependencies: jsonc-parser: 3.3.1 - eslint-plugin-perfectionist@4.15.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-perfectionist@4.15.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: '@typescript-eslint/types': 8.38.0 - '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) natural-orderby: 5.0.0 transitivePeerDependencies: - supports-color - typescript - eslint-plugin-pnpm@1.1.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-pnpm@1.1.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) find-up-simple: 1.0.1 jsonc-eslint-parser: 2.4.0 pathe: 2.0.3 @@ -13302,19 +13293,19 @@ snapshots: tinyglobby: 0.2.14 yaml-eslint-parser: 1.3.0 - eslint-plugin-react-debug@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-react-debug@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13322,19 +13313,19 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-dom@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-react-dom@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) compare-versions: 6.1.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13342,19 +13333,19 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks-extra@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-react-hooks-extra@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13362,23 +13353,23 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-hooks@5.2.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-react-hooks@5.2.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) - eslint-plugin-react-naming-convention@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-react-naming-convention@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13386,22 +13377,22 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-refresh@0.4.20(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-react-refresh@0.4.20(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) - eslint-plugin-react-web-api@1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-react-web-api@1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13409,21 +13400,21 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react-x@1.52.3(eslint@9.31.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3): + eslint-plugin-react-x@1.52.3(eslint@9.32.0(jiti@1.21.7))(ts-api-utils@2.1.0(typescript@5.8.3))(typescript@5.8.3): dependencies: - '@eslint-react/ast': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/core': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/ast': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/core': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@eslint-react/eff': 1.52.3 - '@eslint-react/kit': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/shared': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@eslint-react/var': 1.52.3(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/kit': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/shared': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@eslint-react/var': 1.52.3(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/scope-manager': 8.37.0 - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/types': 8.37.0 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) compare-versions: 6.1.1 - eslint: 9.31.0(jiti@1.21.7) - is-immutable-type: 5.0.1(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) + is-immutable-type: 5.0.1(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) string-ts: 2.2.1 ts-pattern: 5.7.1 optionalDependencies: @@ -13432,7 +13423,7 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-react@7.37.5(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-react@7.37.5(eslint@9.32.0(jiti@1.21.7)): dependencies: array-includes: '@nolyfill/array-includes@1.0.44' array.prototype.findlast: '@nolyfill/array.prototype.findlast@1.0.44' @@ -13440,7 +13431,7 @@ snapshots: array.prototype.tosorted: '@nolyfill/array.prototype.tosorted@1.0.44' doctrine: 2.1.0 es-iterator-helpers: '@nolyfill/es-iterator-helpers@1.0.21' - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) estraverse: 5.3.0 hasown: '@nolyfill/hasown@1.0.44' jsx-ast-utils: 3.3.5 @@ -13454,23 +13445,23 @@ snapshots: string.prototype.matchall: '@nolyfill/string.prototype.matchall@1.0.44' string.prototype.repeat: '@nolyfill/string.prototype.repeat@1.0.44' - eslint-plugin-regexp@2.9.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-regexp@2.9.0(eslint@9.32.0(jiti@1.21.7)): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@eslint-community/regexpp': 4.12.1 comment-parser: 1.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) jsdoc-type-pratt-parser: 4.1.0 refa: 0.12.1 regexp-ast-analysis: 0.7.1 scslre: 0.3.0 - eslint-plugin-sonarjs@3.0.4(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-sonarjs@3.0.4(eslint@9.32.0(jiti@1.21.7)): dependencies: '@eslint-community/regexpp': 4.12.1 builtin-modules: 3.3.0 bytes: 3.1.2 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) functional-red-black-tree: 1.0.1 jsx-ast-utils: 3.3.5 lodash.merge: 4.6.2 @@ -13479,11 +13470,11 @@ snapshots: semver: 7.7.2 typescript: 5.8.3 - eslint-plugin-storybook@0.11.6(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + eslint-plugin-storybook@0.11.6(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@storybook/csf': 0.1.13 - '@typescript-eslint/utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@storybook/csf': 0.1.12 + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) ts-dedent: 2.2.0 transitivePeerDependencies: - supports-color @@ -13495,26 +13486,26 @@ snapshots: postcss: 8.5.6 tailwindcss: 3.4.17(ts-node@10.9.2(@types/node@18.15.0)(typescript@5.8.3)) - eslint-plugin-toml@0.12.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-toml@0.12.0(eslint@9.32.0(jiti@1.21.7)): dependencies: debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) - eslint-compat-utils: 0.6.5(eslint@9.31.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-compat-utils: 0.6.5(eslint@9.32.0(jiti@1.21.7)) lodash: 4.17.21 toml-eslint-parser: 0.10.0 transitivePeerDependencies: - supports-color - eslint-plugin-unicorn@60.0.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-unicorn@60.0.0(eslint@9.32.0(jiti@1.21.7)): dependencies: '@babel/helper-validator-identifier': 7.27.1 - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@eslint/plugin-kit': 0.3.4 change-case: 5.4.4 ci-info: 4.3.0 clean-regexp: 1.0.0 core-js-compat: 3.44.0 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) esquery: 1.6.0 find-up-simple: 1.0.1 globals: 16.3.0 @@ -13527,40 +13518,40 @@ snapshots: semver: 7.7.2 strip-indent: 4.0.0 - eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-unused-imports@4.1.4(@typescript-eslint/eslint-plugin@8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7)): dependencies: - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7))): + eslint-plugin-vue@10.3.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(vue-eslint-parser@10.2.0(eslint@9.32.0(jiti@1.21.7))): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) - eslint: 9.31.0(jiti@1.21.7) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) natural-compare: 1.4.0 nth-check: 2.1.1 postcss-selector-parser: 6.1.2 semver: 7.7.2 - vue-eslint-parser: 10.2.0(eslint@9.31.0(jiti@1.21.7)) + vue-eslint-parser: 10.2.0(eslint@9.32.0(jiti@1.21.7)) xml-name-validator: 4.0.0 optionalDependencies: - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) - eslint-plugin-yml@1.18.0(eslint@9.31.0(jiti@1.21.7)): + eslint-plugin-yml@1.18.0(eslint@9.32.0(jiti@1.21.7)): dependencies: debug: 4.4.1 escape-string-regexp: 4.0.0 - eslint: 9.31.0(jiti@1.21.7) - eslint-compat-utils: 0.6.5(eslint@9.31.0(jiti@1.21.7)) + eslint: 9.32.0(jiti@1.21.7) + eslint-compat-utils: 0.6.5(eslint@9.32.0(jiti@1.21.7)) natural-compare: 1.4.0 yaml-eslint-parser: 1.3.0 transitivePeerDependencies: - supports-color - eslint-processor-vue-blocks@2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.31.0(jiti@1.21.7)): + eslint-processor-vue-blocks@2.0.0(@vue/compiler-sfc@3.5.17)(eslint@9.32.0(jiti@1.21.7)): dependencies: '@vue/compiler-sfc': 3.5.17 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) eslint-scope@5.1.1: dependencies: @@ -13576,16 +13567,16 @@ snapshots: eslint-visitor-keys@4.2.1: {} - eslint@9.31.0(jiti@1.21.7): + eslint@9.32.0(jiti@1.21.7): dependencies: - '@eslint-community/eslint-utils': 4.7.0(eslint@9.31.0(jiti@1.21.7)) + '@eslint-community/eslint-utils': 4.7.0(eslint@9.32.0(jiti@1.21.7)) '@eslint-community/regexpp': 4.12.1 '@eslint/config-array': 0.21.0 '@eslint/config-helpers': 0.3.0 '@eslint/core': 0.15.1 '@eslint/eslintrc': 3.3.1 - '@eslint/js': 9.31.0 - '@eslint/plugin-kit': 0.3.3 + '@eslint/js': 9.32.0 + '@eslint/plugin-kit': 0.3.4 '@humanfs/node': 0.16.6 '@humanwhocodes/module-importer': 1.0.1 '@humanwhocodes/retry': 0.4.3 @@ -14328,10 +14319,10 @@ snapshots: is-hexadecimal@2.0.1: {} - is-immutable-type@5.0.1(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + is-immutable-type@5.0.1(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@typescript-eslint/type-utils': 8.37.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/type-utils': 8.37.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) ts-api-utils: 2.1.0(typescript@5.8.3) ts-declaration-location: 1.0.7(typescript@5.8.3) typescript: 5.8.3 @@ -17208,13 +17199,13 @@ snapshots: type-fest@2.19.0: {} - typescript-eslint@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3): + typescript-eslint@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3): dependencies: - '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - '@typescript-eslint/parser': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/eslint-plugin': 8.38.0(@typescript-eslint/parser@8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3))(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + '@typescript-eslint/parser': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) '@typescript-eslint/typescript-estree': 8.38.0(typescript@5.8.3) - '@typescript-eslint/utils': 8.38.0(eslint@9.31.0(jiti@1.21.7))(typescript@5.8.3) - eslint: 9.31.0(jiti@1.21.7) + '@typescript-eslint/utils': 8.38.0(eslint@9.32.0(jiti@1.21.7))(typescript@5.8.3) + eslint: 9.32.0(jiti@1.21.7) typescript: 5.8.3 transitivePeerDependencies: - supports-color @@ -17427,10 +17418,10 @@ snapshots: vscode-uri@3.0.8: {} - vue-eslint-parser@10.2.0(eslint@9.31.0(jiti@1.21.7)): + vue-eslint-parser@10.2.0(eslint@9.32.0(jiti@1.21.7)): dependencies: debug: 4.4.1 - eslint: 9.31.0(jiti@1.21.7) + eslint: 9.32.0(jiti@1.21.7) eslint-scope: 8.4.0 eslint-visitor-keys: 4.2.1 espree: 10.4.0 From ffddabde43d541d380b46922596c6dbf0e575140 Mon Sep 17 00:00:00 2001 From: Aurelius Huang Date: Wed, 30 Jul 2025 21:35:20 +0800 Subject: [PATCH 079/415] feat(notion): Notion Database extracts Rows content `in row order` and appends `Row Page URL` (#22646) Co-authored-by: Aurelius Huang --- api/core/rag/extractor/notion_extractor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/rag/extractor/notion_extractor.py b/api/core/rag/extractor/notion_extractor.py index 875626eb34..17f4d1af2d 100644 --- a/api/core/rag/extractor/notion_extractor.py +++ b/api/core/rag/extractor/notion_extractor.py @@ -1,5 +1,6 @@ import json import logging +import operator from typing import Any, Optional, cast import requests @@ -130,13 +131,15 @@ class NotionExtractor(BaseExtractor): data[property_name] = value row_dict = {k: v for k, v in data.items() if v} row_content = "" - for key, value in row_dict.items(): + for key, value in sorted(row_dict.items(), key=operator.itemgetter(0)): if isinstance(value, dict): value_dict = {k: v for k, v in value.items() if v} value_content = "".join(f"{k}:{v} " for k, v in value_dict.items()) row_content = row_content + f"{key}:{value_content}\n" else: row_content = row_content + f"{key}:{value}\n" + if "url" in result: + row_content = row_content + f"Row Page URL:{result.get('url', '')}\n" database_content.append(row_content) has_more = response_data.get("has_more", False) From 142ab7478415fd128931931da52647cde54b36bd Mon Sep 17 00:00:00 2001 From: Ali Saleh Date: Thu, 31 Jul 2025 03:58:26 +0500 Subject: [PATCH 080/415] feat: Enable Tracing Support For Phoenix Cloud Instance (#23196) --- .../arize_phoenix_trace/arize_phoenix_trace.py | 11 +++++++++-- api/core/ops/entities/config_entity.py | 2 +- .../unit_tests/core/ops/test_config_entity.py | 17 ++++++++++++----- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py index a20f2485c8..e7c90c1229 100644 --- a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py +++ b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py @@ -4,6 +4,7 @@ import logging import os from datetime import datetime, timedelta from typing import Any, Optional, Union, cast +from urllib.parse import urlparse from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes from opentelemetry import trace @@ -40,8 +41,14 @@ def setup_tracer(arize_phoenix_config: ArizeConfig | PhoenixConfig) -> tuple[tra try: # Choose the appropriate exporter based on config type exporter: Union[GrpcOTLPSpanExporter, HttpOTLPSpanExporter] + + # Inspect the provided endpoint to determine its structure + parsed = urlparse(arize_phoenix_config.endpoint) + base_endpoint = f"{parsed.scheme}://{parsed.netloc}" + path = parsed.path.rstrip("/") + if isinstance(arize_phoenix_config, ArizeConfig): - arize_endpoint = f"{arize_phoenix_config.endpoint}/v1" + arize_endpoint = f"{base_endpoint}/v1" arize_headers = { "api_key": arize_phoenix_config.api_key or "", "space_id": arize_phoenix_config.space_id or "", @@ -53,7 +60,7 @@ def setup_tracer(arize_phoenix_config: ArizeConfig | PhoenixConfig) -> tuple[tra timeout=30, ) else: - phoenix_endpoint = f"{arize_phoenix_config.endpoint}/v1/traces" + phoenix_endpoint = f"{base_endpoint}{path}/v1/traces" phoenix_headers = { "api_key": arize_phoenix_config.api_key or "", "authorization": f"Bearer {arize_phoenix_config.api_key or ''}", diff --git a/api/core/ops/entities/config_entity.py b/api/core/ops/entities/config_entity.py index 626782cee5..851a77fbc1 100644 --- a/api/core/ops/entities/config_entity.py +++ b/api/core/ops/entities/config_entity.py @@ -87,7 +87,7 @@ class PhoenixConfig(BaseTracingConfig): @field_validator("endpoint") @classmethod def endpoint_validator(cls, v, info: ValidationInfo): - return cls.validate_endpoint_url(v, "https://app.phoenix.arize.com") + return validate_url_with_path(v, "https://app.phoenix.arize.com") class LangfuseConfig(BaseTracingConfig): diff --git a/api/tests/unit_tests/core/ops/test_config_entity.py b/api/tests/unit_tests/core/ops/test_config_entity.py index 209f8b7c57..1dc380ad0b 100644 --- a/api/tests/unit_tests/core/ops/test_config_entity.py +++ b/api/tests/unit_tests/core/ops/test_config_entity.py @@ -102,9 +102,14 @@ class TestPhoenixConfig: assert config.project == "default" def test_endpoint_validation_with_path(self): - """Test endpoint validation normalizes URL by removing path""" - config = PhoenixConfig(endpoint="https://custom.phoenix.com/api/v1") - assert config.endpoint == "https://custom.phoenix.com" + """Test endpoint validation with path""" + config = PhoenixConfig(endpoint="https://app.phoenix.arize.com/s/dify-integration") + assert config.endpoint == "https://app.phoenix.arize.com/s/dify-integration" + + def test_endpoint_validation_without_path(self): + """Test endpoint validation without path""" + config = PhoenixConfig(endpoint="https://app.phoenix.arize.com") + assert config.endpoint == "https://app.phoenix.arize.com" class TestLangfuseConfig: @@ -368,13 +373,15 @@ class TestConfigIntegration: """Test that URL normalization works consistently across configs""" # Test that paths are removed from endpoints arize_config = ArizeConfig(endpoint="https://arize.com/api/v1/test") - phoenix_config = PhoenixConfig(endpoint="https://phoenix.com/api/v2/") + phoenix_with_path_config = PhoenixConfig(endpoint="https://app.phoenix.arize.com/s/dify-integration") + phoenix_without_path_config = PhoenixConfig(endpoint="https://app.phoenix.arize.com") aliyun_config = AliyunConfig( license_key="test_license", endpoint="https://tracing-analysis-dc-hz.aliyuncs.com/api/v1/traces" ) assert arize_config.endpoint == "https://arize.com" - assert phoenix_config.endpoint == "https://phoenix.com" + assert phoenix_with_path_config.endpoint == "https://app.phoenix.arize.com/s/dify-integration" + assert phoenix_without_path_config.endpoint == "https://app.phoenix.arize.com" assert aliyun_config.endpoint == "https://tracing-analysis-dc-hz.aliyuncs.com" def test_project_default_values(self): From 646900b00c36ec968a3d713e8364e34d7f0b139c Mon Sep 17 00:00:00 2001 From: znn Date: Thu, 31 Jul 2025 07:33:03 +0530 Subject: [PATCH 081/415] fixing embedded chat styling (#23198) --- web/public/embed.js | 2 ++ web/public/embed.min.js | 66 ++++++++++++++++++++++++++++------------- 2 files changed, 47 insertions(+), 21 deletions(-) diff --git a/web/public/embed.js b/web/public/embed.js index e41405dbf8..54aa6a95b1 100644 --- a/web/public/embed.js +++ b/web/public/embed.js @@ -38,6 +38,7 @@ height: 43.75rem; max-height: calc(100vh - 6rem); border: none; + border-radius: 1rem; z-index: 2147483640; overflow: hidden; user-select: none; @@ -62,6 +63,7 @@ height: 88%; max-height: calc(100vh - 6rem); border: none; + border-radius: 1rem; z-index: 2147483640; overflow: hidden; user-select: none; diff --git a/web/public/embed.min.js b/web/public/embed.min.js index b1d6f56920..42132e0359 100644 --- a/web/public/embed.min.js +++ b/web/public/embed.min.js @@ -1,42 +1,66 @@ -(()=>{let t="difyChatbotConfig",h="dify-chatbot-bubble-button",m="dify-chatbot-bubble-window",y=window[t],a=!1,l=` +(function(){const configKey="difyChatbotConfig";const buttonId="dify-chatbot-bubble-button";const iframeId="dify-chatbot-bubble-window";const config=window[configKey];let isExpanded=false;const svgIcons=` + + + + `;const originalIframeStyleText=` position: absolute; display: flex; flex-direction: column; justify-content: space-between; top: unset; - right: var(--${h}-right, 1rem); /* Align with dify-chatbot-bubble-button. */ - bottom: var(--${h}-bottom, 1rem); /* Align with dify-chatbot-bubble-button. */ + right: var(--${buttonId}-right, 1rem); /* Align with dify-chatbot-bubble-button. */ + bottom: var(--${buttonId}-bottom, 1rem); /* Align with dify-chatbot-bubble-button. */ left: unset; width: 24rem; max-width: calc(100vw - 2rem); height: 43.75rem; max-height: calc(100vh - 6rem); border: none; + border-radius: 1rem; z-index: 2147483640; overflow: hidden; user-select: none; transition-property: width, height; transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); transition-duration: 150ms; - `;async function e(){let u=!1;if(y&&y.token){var e=new URLSearchParams({...await(async()=>{var e=y?.inputs||{};let n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n[e]=await i(t)})),n})(),...await(async()=>{var e=y?.systemVariables||{};let n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n["sys."+e]=await i(t)})),n})(),...await(async()=>{var e=y?.userVariables||{};let n={};return await Promise.all(Object.entries(e).map(async([e,t])=>{n["user."+e]=await i(t)})),n})()}),n=y.baseUrl||`https://${y.isDev?"dev.":""}udify.app`;let o=new URL(n).origin,t=`${n}/chatbot/${y.token}?`+e;n=s();async function i(e){e=(new TextEncoder).encode(e),e=new Response(new Blob([e]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer(),e=new Uint8Array(await e);return btoa(String.fromCharCode(...e))}function s(){var e=document.createElement("iframe");return e.allow="fullscreen;microphone",e.title="dify chatbot bubble window",e.id=m,e.src=t,e.style.cssText=l,e}function r(){var e,t,n;window.innerWidth<=640||(e=document.getElementById(m),t=document.getElementById(h),e&&t&&(t=t.getBoundingClientRect(),n=window.innerHeight/2,t.top+t.height/2{"className"===e?n.classList.add(...t.split(" ")):"style"===e?"object"==typeof t?Object.assign(n.style,t):n.style.cssText=t:"function"==typeof t?n.addEventListener(e.replace(/^on/,"").toLowerCase(),t):n[e]=t}),n.id=h;var e=document.createElement("style"),e=(document.head.appendChild(e),e.sheet.insertRule(` - #${n.id} { + `;const expandedIframeStyleText=` + position: absolute; + display: flex; + flex-direction: column; + justify-content: space-between; + top: unset; + right: var(--${buttonId}-right, 1rem); /* Align with dify-chatbot-bubble-button. */ + bottom: var(--${buttonId}-bottom, 1rem); /* Align with dify-chatbot-bubble-button. */ + left: unset; + min-width: 24rem; + width: 48%; + max-width: 40rem; /* Match mobile breakpoint*/ + min-height: 43.75rem; + height: 88%; + max-height: calc(100vh - 6rem); + border: none; + border-radius: 1rem; + z-index: 2147483640; + overflow: hidden; + user-select: none; + transition-property: width, height; + transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1); + transition-duration: 150ms; + `;async function embedChatbot(){let isDragging=false;if(!config||!config.token){console.error(`${configKey} is empty or token is not provided`);return}async function compressAndEncodeBase64(input){const uint8Array=(new TextEncoder).encode(input);const compressedStream=new Response(new Blob([uint8Array]).stream().pipeThrough(new CompressionStream("gzip"))).arrayBuffer();const compressedUint8Array=new Uint8Array(await compressedStream);return btoa(String.fromCharCode(...compressedUint8Array))}async function getCompressedInputsFromConfig(){const inputs=config?.inputs||{};const compressedInputs={};await Promise.all(Object.entries(inputs).map(async([key,value])=>{compressedInputs[key]=await compressAndEncodeBase64(value)}));return compressedInputs}async function getCompressedSystemVariablesFromConfig(){const systemVariables=config?.systemVariables||{};const compressedSystemVariables={};await Promise.all(Object.entries(systemVariables).map(async([key,value])=>{compressedSystemVariables[`sys.${key}`]=await compressAndEncodeBase64(value)}));return compressedSystemVariables}async function getCompressedUserVariablesFromConfig(){const userVariables=config?.userVariables||{};const compressedUserVariables={};await Promise.all(Object.entries(userVariables).map(async([key,value])=>{compressedUserVariables[`user.${key}`]=await compressAndEncodeBase64(value)}));return compressedUserVariables}const params=new URLSearchParams({...await getCompressedInputsFromConfig(),...await getCompressedSystemVariablesFromConfig(),...await getCompressedUserVariablesFromConfig()});const baseUrl=config.baseUrl||`https://${config.isDev?"dev.":""}udify.app`;const targetOrigin=new URL(baseUrl).origin;const iframeUrl=`${baseUrl}/chatbot/${config.token}?${params}`;const preloadedIframe=createIframe();preloadedIframe.style.display="none";document.body.appendChild(preloadedIframe);if(iframeUrl.length>2048){console.error("The URL is too long, please reduce the number of inputs to prevent the bot from failing to load")}function createIframe(){const iframe=document.createElement("iframe");iframe.allow="fullscreen;microphone";iframe.title="dify chatbot bubble window";iframe.id=iframeId;iframe.src=iframeUrl;iframe.style.cssText=originalIframeStyleText;return iframe}function resetIframePosition(){if(window.innerWidth<=640)return;const targetIframe=document.getElementById(iframeId);const targetButton=document.getElementById(buttonId);if(targetIframe&&targetButton){const buttonRect=targetButton.getBoundingClientRect();const viewportCenterY=window.innerHeight/2;const buttonCenterY=buttonRect.top+buttonRect.height/2;if(buttonCenterY{if(event.origin!==targetOrigin)return;const targetIframe=document.getElementById(iframeId);if(!targetIframe||event.source!==targetIframe.contentWindow)return;if(event.data.type==="dify-chatbot-iframe-ready"){targetIframe.contentWindow?.postMessage({type:"dify-chatbot-config",payload:{isToggledByButton:true,isDraggable:!!config.draggable}},targetOrigin)}if(event.data.type==="dify-chatbot-expand-change"){toggleExpand()}});function createButton(){const containerDiv=document.createElement("div");Object.entries(config.containerProps||{}).forEach(([key,value])=>{if(key==="className"){containerDiv.classList.add(...value.split(" "))}else if(key==="style"){if(typeof value==="object"){Object.assign(containerDiv.style,value)}else{containerDiv.style.cssText=value}}else if(typeof value==="function"){containerDiv.addEventListener(key.replace(/^on/,"").toLowerCase(),value)}else{containerDiv[key]=value}});containerDiv.id=buttonId;const styleSheet=document.createElement("style");document.head.appendChild(styleSheet);styleSheet.sheet.insertRule(` + #${containerDiv.id} { position: fixed; - bottom: var(--${n.id}-bottom, 1rem); - right: var(--${n.id}-right, 1rem); - left: var(--${n.id}-left, unset); - top: var(--${n.id}-top, unset); - width: var(--${n.id}-width, 48px); - height: var(--${n.id}-height, 48px); - border-radius: var(--${n.id}-border-radius, 25px); - background-color: var(--${n.id}-bg-color, #155EEF); - box-shadow: var(--${n.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px); + bottom: var(--${containerDiv.id}-bottom, 1rem); + right: var(--${containerDiv.id}-right, 1rem); + left: var(--${containerDiv.id}-left, unset); + top: var(--${containerDiv.id}-top, unset); + width: var(--${containerDiv.id}-width, 48px); + height: var(--${containerDiv.id}-height, 48px); + border-radius: var(--${containerDiv.id}-border-radius, 25px); + background-color: var(--${containerDiv.id}-bg-color, #155EEF); + box-shadow: var(--${containerDiv.id}-box-shadow, rgba(0, 0, 0, 0.2) 0px 4px 8px 0px); cursor: pointer; z-index: 2147483647; } - `),document.createElement("div"));function t(){var e;u||((e=document.getElementById(m))?(e.style.display="none"===e.style.display?"block":"none","none"===e.style.display?p("open"):p("close"),"none"===e.style.display?document.removeEventListener("keydown",b):document.addEventListener("keydown",b),r()):(n.appendChild(s()),r(),this.title="Exit (ESC)",p("close"),document.addEventListener("keydown",b)))}if(e.style.cssText="position: relative; display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;",e.innerHTML=` - - - - `,n.appendChild(e),document.body.appendChild(n),n.addEventListener("click",t),n.addEventListener("touchend",e=>{e.preventDefault(),t()},{passive:!1}),y.draggable){var a=n;var l=y.dragAxis||"both";let s,r,t,d;function o(e){u=!1,d=("touchstart"===e.type?(s=e.touches[0].clientX-a.offsetLeft,r=e.touches[0].clientY-a.offsetTop,t=e.touches[0].clientX,e.touches[0]):(s=e.clientX-a.offsetLeft,r=e.clientY-a.offsetTop,t=e.clientX,e)).clientY,document.addEventListener("mousemove",i),document.addEventListener("touchmove",i,{passive:!1}),document.addEventListener("mouseup",c),document.addEventListener("touchend",c),e.preventDefault()}function i(n){var o="touchmove"===n.type?n.touches[0]:n,i=o.clientX-t,o=o.clientY-d;if(u=8{u=!1},0),a.style.transition="",a.style.cursor="pointer",document.removeEventListener("mousemove",i),document.removeEventListener("touchmove",i),document.removeEventListener("mouseup",c),document.removeEventListener("touchend",c)}a.addEventListener("mousedown",o),a.addEventListener("touchstart",o)}}n.style.display="none",document.body.appendChild(n),2048{var t,n;e.origin===o&&(t=document.getElementById(m))&&e.source===t.contentWindow&&("dify-chatbot-iframe-ready"===e.data.type&&t.contentWindow?.postMessage({type:"dify-chatbot-config",payload:{isToggledByButton:!0,isDraggable:!!y.draggable}},o),"dify-chatbot-expand-change"===e.data.type)&&(a=!a,n=document.getElementById(m))&&(a?n.style.cssText="\n position: absolute;\n display: flex;\n flex-direction: column;\n justify-content: space-between;\n top: unset;\n right: var(--dify-chatbot-bubble-button-right, 1rem); /* Align with dify-chatbot-bubble-button. */\n bottom: var(--dify-chatbot-bubble-button-bottom, 1rem); /* Align with dify-chatbot-bubble-button. */\n left: unset;\n min-width: 24rem;\n width: 48%;\n max-width: 40rem; /* Match mobile breakpoint*/\n min-height: 43.75rem;\n height: 88%;\n max-height: calc(100vh - 6rem);\n border: none;\n z-index: 2147483640;\n overflow: hidden;\n user-select: none;\n transition-property: width, height;\n transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);\n transition-duration: 150ms;\n ":n.style.cssText=l,r())}),document.getElementById(h)||d()}else console.error(t+" is empty or token is not provided")}function p(e="open"){"open"===e?(document.getElementById("openIcon").style.display="block",document.getElementById("closeIcon").style.display="none"):(document.getElementById("openIcon").style.display="none",document.getElementById("closeIcon").style.display="block")}function b(e){"Escape"===e.key&&(e=document.getElementById(m))&&"none"!==e.style.display&&(e.style.display="none",p("open"))}h,h,document.addEventListener("keydown",b),y?.dynamicScript?e():document.body.onload=e})(); \ No newline at end of file + `);const displayDiv=document.createElement("div");displayDiv.style.cssText="position: relative; display: flex; align-items: center; justify-content: center; width: 100%; height: 100%; z-index: 2147483647;";displayDiv.innerHTML=svgIcons;containerDiv.appendChild(displayDiv);document.body.appendChild(containerDiv);containerDiv.addEventListener("click",handleClick);containerDiv.addEventListener("touchend",event=>{event.preventDefault();handleClick()},{passive:false});function handleClick(){if(isDragging)return;const targetIframe=document.getElementById(iframeId);if(!targetIframe){containerDiv.appendChild(createIframe());resetIframePosition();this.title="Exit (ESC)";setSvgIcon("close");document.addEventListener("keydown",handleEscKey);return}targetIframe.style.display=targetIframe.style.display==="none"?"block":"none";targetIframe.style.display==="none"?setSvgIcon("open"):setSvgIcon("close");if(targetIframe.style.display==="none"){document.removeEventListener("keydown",handleEscKey)}else{document.addEventListener("keydown",handleEscKey)}resetIframePosition()}if(config.draggable){enableDragging(containerDiv,config.dragAxis||"both")}}function enableDragging(element,axis){let startX,startY,startClientX,startClientY;element.addEventListener("mousedown",startDragging);element.addEventListener("touchstart",startDragging);function startDragging(e){isDragging=false;if(e.type==="touchstart"){startX=e.touches[0].clientX-element.offsetLeft;startY=e.touches[0].clientY-element.offsetTop;startClientX=e.touches[0].clientX;startClientY=e.touches[0].clientY}else{startX=e.clientX-element.offsetLeft;startY=e.clientY-element.offsetTop;startClientX=e.clientX;startClientY=e.clientY}document.addEventListener("mousemove",drag);document.addEventListener("touchmove",drag,{passive:false});document.addEventListener("mouseup",stopDragging);document.addEventListener("touchend",stopDragging);e.preventDefault()}function drag(e){const touch=e.type==="touchmove"?e.touches[0]:e;const deltaX=touch.clientX-startClientX;const deltaY=touch.clientY-startClientY;if(Math.abs(deltaX)>8||Math.abs(deltaY)>8){isDragging=true}if(!isDragging)return;element.style.transition="none";element.style.cursor="grabbing";const targetIframe=document.getElementById(iframeId);if(targetIframe){targetIframe.style.display="none";setSvgIcon("open")}let newLeft,newBottom;if(e.type==="touchmove"){newLeft=e.touches[0].clientX-startX;newBottom=window.innerHeight-e.touches[0].clientY-startY}else{newLeft=e.clientX-startX;newBottom=window.innerHeight-e.clientY-startY}const elementRect=element.getBoundingClientRect();const maxX=window.innerWidth-elementRect.width;const maxY=window.innerHeight-elementRect.height;if(axis==="x"||axis==="both"){element.style.setProperty(`--${buttonId}-left`,`${Math.max(0,Math.min(newLeft,maxX))}px`)}if(axis==="y"||axis==="both"){element.style.setProperty(`--${buttonId}-bottom`,`${Math.max(0,Math.min(newBottom,maxY))}px`)}}function stopDragging(){setTimeout(()=>{isDragging=false},0);element.style.transition="";element.style.cursor="pointer";document.removeEventListener("mousemove",drag);document.removeEventListener("touchmove",drag);document.removeEventListener("mouseup",stopDragging);document.removeEventListener("touchend",stopDragging)}}if(!document.getElementById(buttonId)){createButton()}}function setSvgIcon(type="open"){if(type==="open"){document.getElementById("openIcon").style.display="block";document.getElementById("closeIcon").style.display="none"}else{document.getElementById("openIcon").style.display="none";document.getElementById("closeIcon").style.display="block"}}function handleEscKey(event){if(event.key==="Escape"){const targetIframe=document.getElementById(iframeId);if(targetIframe&&targetIframe.style.display!=="none"){targetIframe.style.display="none";setSvgIcon("open")}}}document.addEventListener("keydown",handleEscKey);if(config?.dynamicScript){embedChatbot()}else{document.body.onload=embedChatbot}})(); \ No newline at end of file From 1b2046da3f6d63653ac778ce2239ca31f257ba06 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Thu, 31 Jul 2025 10:03:33 +0800 Subject: [PATCH 082/415] fix: prevent client-side crashes from null/undefined plugin data in workflow (#23154) (#23182) --- web/__tests__/check-i18n.test.ts | 11 +- .../plugin-tool-workflow-error.test.tsx | 207 ++++++++++++++++++ .../components/switch-plugin-version.tsx | 8 +- .../nodes/agent/use-single-run-form-params.ts | 8 +- .../nodes/http/use-single-run-form-params.ts | 8 +- .../nodes/llm/use-single-run-form-params.ts | 8 +- .../use-single-run-form-params.ts | 8 +- .../use-single-run-form-params.ts | 8 +- .../nodes/tool/use-single-run-form-params.ts | 8 +- 9 files changed, 260 insertions(+), 14 deletions(-) create mode 100644 web/__tests__/plugin-tool-workflow-error.test.tsx diff --git a/web/__tests__/check-i18n.test.ts b/web/__tests__/check-i18n.test.ts index 173aa96118..3bde095f4b 100644 --- a/web/__tests__/check-i18n.test.ts +++ b/web/__tests__/check-i18n.test.ts @@ -49,9 +49,9 @@ describe('check-i18n script functionality', () => { } vm.runInNewContext(transpile(content), context) - const translationObj = moduleExports.default || moduleExports + const translationObj = (context.module.exports as any).default || context.module.exports - if(!translationObj || typeof translationObj !== 'object') + if (!translationObj || typeof translationObj !== 'object') throw new Error(`Error parsing file: ${filePath}`) const nestedKeys: string[] = [] @@ -62,7 +62,7 @@ describe('check-i18n script functionality', () => { // This is an object (but not array), recurse into it but don't add it as a key iterateKeys(obj[key], nestedKey) } - else { + else { // This is a leaf node (string, number, boolean, array, etc.), add it as a key nestedKeys.push(nestedKey) } @@ -73,7 +73,7 @@ describe('check-i18n script functionality', () => { const fileKeys = nestedKeys.map(key => `${camelCaseFileName}.${key}`) allKeys.push(...fileKeys) } - catch (error) { + catch (error) { reject(error) } }) @@ -272,9 +272,6 @@ export default translation const filteredEnKeys = allEnKeys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())), ) - const filteredZhKeys = allZhKeys.filter(key => - key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())), - ) expect(allEnKeys).toHaveLength(4) // 2 keys from each file expect(filteredEnKeys).toHaveLength(2) // only components keys diff --git a/web/__tests__/plugin-tool-workflow-error.test.tsx b/web/__tests__/plugin-tool-workflow-error.test.tsx new file mode 100644 index 0000000000..370052bc80 --- /dev/null +++ b/web/__tests__/plugin-tool-workflow-error.test.tsx @@ -0,0 +1,207 @@ +/** + * Test cases to reproduce the plugin tool workflow error + * Issue: #23154 - Application error when loading plugin tools in workflow + * Root cause: split() operation called on null/undefined values + */ + +describe('Plugin Tool Workflow Error Reproduction', () => { + /** + * Mock function to simulate the problematic code in switch-plugin-version.tsx:29 + * const [pluginId] = uniqueIdentifier.split(':') + */ + const mockSwitchPluginVersionLogic = (uniqueIdentifier: string | null | undefined) => { + // This directly reproduces the problematic line from switch-plugin-version.tsx:29 + const [pluginId] = uniqueIdentifier!.split(':') + return pluginId + } + + /** + * Test case 1: Simulate null uniqueIdentifier + * This should reproduce the error mentioned in the issue + */ + it('should reproduce error when uniqueIdentifier is null', () => { + expect(() => { + mockSwitchPluginVersionLogic(null) + }).toThrow('Cannot read properties of null (reading \'split\')') + }) + + /** + * Test case 2: Simulate undefined uniqueIdentifier + */ + it('should reproduce error when uniqueIdentifier is undefined', () => { + expect(() => { + mockSwitchPluginVersionLogic(undefined) + }).toThrow('Cannot read properties of undefined (reading \'split\')') + }) + + /** + * Test case 3: Simulate empty string uniqueIdentifier + */ + it('should handle empty string uniqueIdentifier', () => { + expect(() => { + const result = mockSwitchPluginVersionLogic('') + expect(result).toBe('') // Empty string split by ':' returns [''] + }).not.toThrow() + }) + + /** + * Test case 4: Simulate malformed uniqueIdentifier without colon separator + */ + it('should handle malformed uniqueIdentifier without colon separator', () => { + expect(() => { + const result = mockSwitchPluginVersionLogic('malformed-identifier-without-colon') + expect(result).toBe('malformed-identifier-without-colon') // No colon means full string returned + }).not.toThrow() + }) + + /** + * Test case 5: Simulate valid uniqueIdentifier + */ + it('should work correctly with valid uniqueIdentifier', () => { + expect(() => { + const result = mockSwitchPluginVersionLogic('valid-plugin-id:1.0.0') + expect(result).toBe('valid-plugin-id') + }).not.toThrow() + }) +}) + +/** + * Test for the variable processing split error in use-single-run-form-params + */ +describe('Variable Processing Split Error', () => { + /** + * Mock function to simulate the problematic code in use-single-run-form-params.ts:91 + * const getDependentVars = () => { + * return varInputs.map(item => item.variable.slice(1, -1).split('.')) + * } + */ + const mockGetDependentVars = (varInputs: Array<{ variable: string | null | undefined }>) => { + return varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) // Filter out empty arrays + } + + /** + * Test case 1: Variable processing with null variable + */ + it('should handle null variable safely', () => { + const varInputs = [{ variable: null }] + + expect(() => { + mockGetDependentVars(varInputs) + }).not.toThrow() + + const result = mockGetDependentVars(varInputs) + expect(result).toEqual([]) // null variables are filtered out + }) + + /** + * Test case 2: Variable processing with undefined variable + */ + it('should handle undefined variable safely', () => { + const varInputs = [{ variable: undefined }] + + expect(() => { + mockGetDependentVars(varInputs) + }).not.toThrow() + + const result = mockGetDependentVars(varInputs) + expect(result).toEqual([]) // undefined variables are filtered out + }) + + /** + * Test case 3: Variable processing with empty string + */ + it('should handle empty string variable', () => { + const varInputs = [{ variable: '' }] + + expect(() => { + mockGetDependentVars(varInputs) + }).not.toThrow() + + const result = mockGetDependentVars(varInputs) + expect(result).toEqual([]) // Empty string is filtered out, so result is empty array + }) + + /** + * Test case 4: Variable processing with valid variable format + */ + it('should work correctly with valid variable format', () => { + const varInputs = [{ variable: '{{workflow.node.output}}' }] + + expect(() => { + mockGetDependentVars(varInputs) + }).not.toThrow() + + const result = mockGetDependentVars(varInputs) + expect(result[0]).toEqual(['{workflow', 'node', 'output}']) + }) +}) + +/** + * Integration test to simulate the complete workflow scenario + */ +describe('Plugin Tool Workflow Integration', () => { + /** + * Simulate the scenario where plugin metadata is incomplete or corrupted + * This can happen when: + * 1. Plugin is being loaded from marketplace but metadata request fails + * 2. Plugin configuration is corrupted in database + * 3. Network issues during plugin loading + */ + it('should reproduce the client-side exception scenario', () => { + // Mock incomplete plugin data that could cause the error + const incompletePluginData = { + // Missing or null uniqueIdentifier + uniqueIdentifier: null, + meta: null, + minimum_dify_version: undefined, + } + + // This simulates the error path that leads to the white screen + expect(() => { + // Simulate the code path in switch-plugin-version.tsx:29 + // The actual problematic code doesn't use optional chaining + const _pluginId = (incompletePluginData.uniqueIdentifier as any).split(':')[0] + }).toThrow('Cannot read properties of null (reading \'split\')') + }) + + /** + * Test the scenario mentioned in the issue where plugin tools are loaded in workflow + */ + it('should simulate plugin tool loading in workflow context', () => { + // Mock the workflow context where plugin tools are being loaded + const workflowPluginTools = [ + { + provider_name: 'test-plugin', + uniqueIdentifier: null, // This is the problematic case + tool_name: 'test-tool', + }, + { + provider_name: 'valid-plugin', + uniqueIdentifier: 'valid-plugin:1.0.0', + tool_name: 'valid-tool', + }, + ] + + // Process each plugin tool + workflowPluginTools.forEach((tool, _index) => { + if (tool.uniqueIdentifier === null) { + // This reproduces the exact error scenario + expect(() => { + const _pluginId = (tool.uniqueIdentifier as any).split(':')[0] + }).toThrow() + } + else { + // Valid tools should work fine + expect(() => { + const _pluginId = tool.uniqueIdentifier.split(':')[0] + }).not.toThrow() + } + }) + }) +}) diff --git a/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx b/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx index 94b3ce7bfc..7ecbbd5602 100644 --- a/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx +++ b/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx @@ -26,7 +26,8 @@ export type SwitchPluginVersionProps = { export const SwitchPluginVersion: FC = (props) => { const { uniqueIdentifier, tooltip, onChange, className } = props - const [pluginId] = uniqueIdentifier.split(':') + + const [pluginId] = uniqueIdentifier?.split(':') || [''] const [isShow, setIsShow] = useState(false) const [isShowUpdateModal, { setTrue: showUpdateModal, setFalse: hideUpdateModal }] = useBoolean(false) const [target, setTarget] = useState<{ @@ -60,6 +61,11 @@ export const SwitchPluginVersion: FC = (props) => { }) } const { t } = useTranslation() + + // Guard against null/undefined uniqueIdentifier to prevent app crash + if (!uniqueIdentifier || !pluginId) + return null + return
    e.stopPropagation()}> {isShowUpdateModal && pluginDetail && { - return varInputs.map(item => item.variable.slice(1, -1).split('.')) + return varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) } return { diff --git a/web/app/components/workflow/nodes/http/use-single-run-form-params.ts b/web/app/components/workflow/nodes/http/use-single-run-form-params.ts index c5d65634c4..42f39c4d32 100644 --- a/web/app/components/workflow/nodes/http/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/http/use-single-run-form-params.ts @@ -62,7 +62,13 @@ const useSingleRunFormParams = ({ }, [inputVarValues, setInputVarValues, varInputs]) const getDependentVars = () => { - return varInputs.map(item => item.variable.slice(1, -1).split('.')) + return varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) } return { diff --git a/web/app/components/workflow/nodes/llm/use-single-run-form-params.ts b/web/app/components/workflow/nodes/llm/use-single-run-form-params.ts index 93a8638d05..2480bbee31 100644 --- a/web/app/components/workflow/nodes/llm/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/llm/use-single-run-form-params.ts @@ -168,7 +168,13 @@ const useSingleRunFormParams = ({ })() const getDependentVars = () => { - const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.')) + const promptVars = varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) const contextVar = payload.context.variable_selector const vars = [...promptVars, contextVar] if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { diff --git a/web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts b/web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts index 178f9e3ed8..f920ff1555 100644 --- a/web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/parameter-extractor/use-single-run-form-params.ts @@ -120,7 +120,13 @@ const useSingleRunFormParams = ({ })() const getDependentVars = () => { - const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.')) + const promptVars = varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) const vars = [payload.query, ...promptVars] if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { const visionVar = payload.vision.configs.variable_selector diff --git a/web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts b/web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts index 66755abb6e..9bbb3e1d5d 100644 --- a/web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts @@ -118,7 +118,13 @@ const useSingleRunFormParams = ({ })() const getDependentVars = () => { - const promptVars = varInputs.map(item => item.variable.slice(1, -1).split('.')) + const promptVars = varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) const vars = [payload.query_variable_selector, ...promptVars] if (isVisionModel && payload.vision?.enabled && payload.vision?.configs?.variable_selector) { const visionVar = payload.vision.configs.variable_selector diff --git a/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts b/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts index 6fc79beebe..13b1da6b01 100644 --- a/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts @@ -88,7 +88,13 @@ const useSingleRunFormParams = ({ const toolIcon = useToolIcon(payload) const getDependentVars = () => { - return varInputs.map(item => item.variable.slice(1, -1).split('.')) + return varInputs.map((item) => { + // Guard against null/undefined variable to prevent app crash + if (!item.variable || typeof item.variable !== 'string') + return [] + + return item.variable.slice(1, -1).split('.') + }).filter(arr => arr.length > 0) } return { From 4251515b4e0b02a9e8228b53830465fc58cdc7ae Mon Sep 17 00:00:00 2001 From: kenwoodjw Date: Thu, 31 Jul 2025 10:30:54 +0800 Subject: [PATCH 083/415] fix remote file (#23127) Signed-off-by: kenwoodjw --- api/factories/file_factory.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/api/factories/file_factory.py b/api/factories/file_factory.py index 512a9cb608..b2bcee5dcd 100644 --- a/api/factories/file_factory.py +++ b/api/factories/file_factory.py @@ -1,4 +1,6 @@ import mimetypes +import os +import urllib.parse import uuid from collections.abc import Callable, Mapping, Sequence from typing import Any, cast @@ -240,16 +242,21 @@ def _build_from_remote_url( def _get_remote_file_info(url: str): file_size = -1 - filename = url.split("/")[-1].split("?")[0] or "unknown_file" - mime_type = mimetypes.guess_type(filename)[0] or "" + parsed_url = urllib.parse.urlparse(url) + url_path = parsed_url.path + filename = os.path.basename(url_path) + + # Initialize mime_type from filename as fallback + mime_type, _ = mimetypes.guess_type(filename) resp = ssrf_proxy.head(url, follow_redirects=True) resp = cast(httpx.Response, resp) if resp.status_code == httpx.codes.OK: if content_disposition := resp.headers.get("Content-Disposition"): filename = str(content_disposition.split("filename=")[-1].strip('"')) + # Re-guess mime_type from updated filename + mime_type, _ = mimetypes.guess_type(filename) file_size = int(resp.headers.get("Content-Length", file_size)) - mime_type = mime_type or str(resp.headers.get("Content-Type", "")) return mime_type, filename, file_size From afac1fe590e3cbff9b8fd6c743ba8f34ec88b21a Mon Sep 17 00:00:00 2001 From: Jason Young <44939412+farion1231@users.noreply.github.com> Date: Thu, 31 Jul 2025 10:32:16 +0800 Subject: [PATCH 084/415] Add comprehensive security tests for file upload controller (#23102) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../console/test_files_security.py | 278 ++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 api/tests/unit_tests/controllers/console/test_files_security.py diff --git a/api/tests/unit_tests/controllers/console/test_files_security.py b/api/tests/unit_tests/controllers/console/test_files_security.py new file mode 100644 index 0000000000..cb5562d345 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/test_files_security.py @@ -0,0 +1,278 @@ +import io +from unittest.mock import patch + +import pytest +from werkzeug.exceptions import Forbidden + +from controllers.common.errors import FilenameNotExistsError +from controllers.console.error import ( + FileTooLargeError, + NoFileUploadedError, + TooManyFilesError, + UnsupportedFileTypeError, +) +from services.errors.file import FileTooLargeError as ServiceFileTooLargeError +from services.errors.file import UnsupportedFileTypeError as ServiceUnsupportedFileTypeError + + +class TestFileUploadSecurity: + """Test file upload security logic without complex framework setup""" + + # Test 1: Basic file validation + def test_should_validate_file_presence(self): + """Test that missing file is detected""" + from flask import Flask, request + + app = Flask(__name__) + + with app.test_request_context(method="POST", data={}): + # Simulate the check in FileApi.post() + if "file" not in request.files: + with pytest.raises(NoFileUploadedError): + raise NoFileUploadedError() + + def test_should_validate_multiple_files(self): + """Test that multiple files are rejected""" + from flask import Flask, request + + app = Flask(__name__) + + file_data = { + "file": (io.BytesIO(b"content1"), "file1.txt", "text/plain"), + "file2": (io.BytesIO(b"content2"), "file2.txt", "text/plain"), + } + + with app.test_request_context(method="POST", data=file_data, content_type="multipart/form-data"): + # Simulate the check in FileApi.post() + if len(request.files) > 1: + with pytest.raises(TooManyFilesError): + raise TooManyFilesError() + + def test_should_validate_empty_filename(self): + """Test that empty filename is rejected""" + from flask import Flask, request + + app = Flask(__name__) + + file_data = {"file": (io.BytesIO(b"content"), "", "text/plain")} + + with app.test_request_context(method="POST", data=file_data, content_type="multipart/form-data"): + file = request.files["file"] + if not file.filename: + with pytest.raises(FilenameNotExistsError): + raise FilenameNotExistsError + + # Test 2: Security - Filename sanitization + def test_should_detect_path_traversal_in_filename(self): + """Test protection against directory traversal attacks""" + dangerous_filenames = [ + "../../../etc/passwd", + "..\\..\\windows\\system32\\config\\sam", + "../../../../etc/shadow", + "./../../../sensitive.txt", + ] + + for filename in dangerous_filenames: + # Any filename containing .. should be considered dangerous + assert ".." in filename, f"Filename {filename} should be detected as path traversal" + + def test_should_detect_null_byte_injection(self): + """Test protection against null byte injection""" + dangerous_filenames = [ + "file.jpg\x00.php", + "document.pdf\x00.exe", + "image.png\x00.sh", + ] + + for filename in dangerous_filenames: + # Null bytes should be detected + assert "\x00" in filename, f"Filename {filename} should be detected as null byte injection" + + def test_should_sanitize_special_characters(self): + """Test that special characters in filenames are handled safely""" + # Characters that could be problematic in various contexts + dangerous_chars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|", "\x00"] + + for char in dangerous_chars: + filename = f"file{char}name.txt" + # These characters should be detected or sanitized + assert any(c in filename for c in dangerous_chars) + + # Test 3: Permission validation + def test_should_validate_dataset_permissions(self): + """Test dataset upload permission logic""" + + class MockUser: + is_dataset_editor = False + + user = MockUser() + source = "datasets" + + # Simulate the permission check in FileApi.post() + if source == "datasets" and not user.is_dataset_editor: + with pytest.raises(Forbidden): + raise Forbidden() + + def test_should_allow_general_upload_without_permission(self): + """Test general upload doesn't require dataset permission""" + + class MockUser: + is_dataset_editor = False + + user = MockUser() + source = None # General upload + + # This should not raise an exception + if source == "datasets" and not user.is_dataset_editor: + raise Forbidden() + # Test passes if no exception is raised + + # Test 4: Service error handling + @patch("services.file_service.FileService.upload_file") + def test_should_handle_file_too_large_error(self, mock_upload): + """Test that service FileTooLargeError is properly converted""" + mock_upload.side_effect = ServiceFileTooLargeError("File too large") + + try: + mock_upload(filename="test.txt", content=b"data", mimetype="text/plain", user=None, source=None) + except ServiceFileTooLargeError as e: + # Simulate the error conversion in FileApi.post() + with pytest.raises(FileTooLargeError): + raise FileTooLargeError(e.description) + + @patch("services.file_service.FileService.upload_file") + def test_should_handle_unsupported_file_type_error(self, mock_upload): + """Test that service UnsupportedFileTypeError is properly converted""" + mock_upload.side_effect = ServiceUnsupportedFileTypeError() + + try: + mock_upload( + filename="test.exe", content=b"data", mimetype="application/octet-stream", user=None, source=None + ) + except ServiceUnsupportedFileTypeError: + # Simulate the error conversion in FileApi.post() + with pytest.raises(UnsupportedFileTypeError): + raise UnsupportedFileTypeError() + + # Test 5: File type security + def test_should_identify_dangerous_file_extensions(self): + """Test detection of potentially dangerous file extensions""" + dangerous_extensions = [ + ".php", + ".PHP", + ".pHp", # PHP files (case variations) + ".exe", + ".EXE", # Executables + ".sh", + ".SH", # Shell scripts + ".bat", + ".BAT", # Batch files + ".cmd", + ".CMD", # Command files + ".ps1", + ".PS1", # PowerShell + ".jar", + ".JAR", # Java archives + ".vbs", + ".VBS", # VBScript + ] + + safe_extensions = [".txt", ".pdf", ".jpg", ".png", ".doc", ".docx"] + + # Just verify our test data is correct + for ext in dangerous_extensions: + assert ext.lower() in [".php", ".exe", ".sh", ".bat", ".cmd", ".ps1", ".jar", ".vbs"] + + for ext in safe_extensions: + assert ext.lower() not in [".php", ".exe", ".sh", ".bat", ".cmd", ".ps1", ".jar", ".vbs"] + + def test_should_detect_double_extensions(self): + """Test detection of double extension attacks""" + suspicious_filenames = [ + "image.jpg.php", + "document.pdf.exe", + "photo.png.sh", + "file.txt.bat", + ] + + for filename in suspicious_filenames: + # Check that these have multiple extensions + parts = filename.split(".") + assert len(parts) > 2, f"Filename {filename} should have multiple extensions" + + # Test 6: Configuration validation + def test_upload_configuration_structure(self): + """Test that upload configuration has correct structure""" + # Simulate the configuration returned by FileApi.get() + config = { + "file_size_limit": 15, + "batch_count_limit": 5, + "image_file_size_limit": 10, + "video_file_size_limit": 500, + "audio_file_size_limit": 50, + "workflow_file_upload_limit": 10, + } + + # Verify all required fields are present + required_fields = [ + "file_size_limit", + "batch_count_limit", + "image_file_size_limit", + "video_file_size_limit", + "audio_file_size_limit", + "workflow_file_upload_limit", + ] + + for field in required_fields: + assert field in config, f"Missing required field: {field}" + assert isinstance(config[field], int), f"Field {field} should be an integer" + assert config[field] > 0, f"Field {field} should be positive" + + # Test 7: Source parameter handling + def test_source_parameter_normalization(self): + """Test that source parameter is properly normalized""" + test_cases = [ + ("datasets", "datasets"), + ("other", None), + ("", None), + (None, None), + ] + + for input_source, expected in test_cases: + # Simulate the source normalization in FileApi.post() + source = "datasets" if input_source == "datasets" else None + if source not in ("datasets", None): + source = None + assert source == expected + + # Test 8: Boundary conditions + def test_should_handle_edge_case_file_sizes(self): + """Test handling of boundary file sizes""" + test_cases = [ + (0, "Empty file"), # 0 bytes + (1, "Single byte"), # 1 byte + (15 * 1024 * 1024 - 1, "Just under limit"), # Just under 15MB + (15 * 1024 * 1024, "At limit"), # Exactly 15MB + (15 * 1024 * 1024 + 1, "Just over limit"), # Just over 15MB + ] + + for size, description in test_cases: + # Just verify our test data + assert isinstance(size, int), f"{description}: Size should be integer" + assert size >= 0, f"{description}: Size should be non-negative" + + def test_should_handle_special_mime_types(self): + """Test handling of various MIME types""" + mime_type_tests = [ + ("application/octet-stream", "Generic binary"), + ("text/plain", "Plain text"), + ("image/jpeg", "JPEG image"), + ("application/pdf", "PDF document"), + ("", "Empty MIME type"), + (None, "None MIME type"), + ] + + for mime_type, description in mime_type_tests: + # Verify test data structure + if mime_type is not None: + assert isinstance(mime_type, str), f"{description}: MIME type should be string or None" From 5febd668089a41cbb01085ca6e53c9b148d2aef2 Mon Sep 17 00:00:00 2001 From: GuanMu Date: Thu, 31 Jul 2025 11:47:34 +0800 Subject: [PATCH 085/415] Fix: Fix style issues (#23209) --- web/app/components/app/annotation/index.tsx | 2 +- web/app/components/app/log/list.tsx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/app/annotation/index.tsx b/web/app/components/app/annotation/index.tsx index 0b0691eb7d..bb2a95b0b5 100644 --- a/web/app/components/app/annotation/index.tsx +++ b/web/app/components/app/annotation/index.tsx @@ -146,7 +146,7 @@ const Annotation: FC = (props) => { return (

    {t('appLog.description')}

    -
    +
    {isChatApp && ( diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index b04148d484..b83e9e6a2a 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -688,7 +688,7 @@ const ConversationList: FC = ({ logs, appDetail, onRefresh }) return return ( -
    +
    From f5e177db89307a1f311803ebe3cd845d408f0cbb Mon Sep 17 00:00:00 2001 From: NFish Date: Thu, 31 Jul 2025 14:18:54 +0800 Subject: [PATCH 086/415] fix: call checkOrSetAccessToken when app access mode is PUBLIC (#23195) Co-authored-by: crazywoola <427733928@qq.com> --- .../app/overview/embedded/index.tsx | 6 +++++ web/context/web-app-context.tsx | 23 ++++++++----------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/web/app/components/app/overview/embedded/index.tsx b/web/app/components/app/overview/embedded/index.tsx index 9d97eae38d..cd25c4ca65 100644 --- a/web/app/components/app/overview/embedded/index.tsx +++ b/web/app/components/app/overview/embedded/index.tsx @@ -46,6 +46,12 @@ const OPTION_MAP = { ? `, baseUrl: '${url}${basePath}'` : ''}, + inputs: { + // You can define the inputs from the Start node here + // key is the variable name + // e.g. + // name: "NAME" + }, systemVariables: { // user_id: 'YOU CAN DEFINE USER ID HERE', // conversation_id: 'YOU CAN DEFINE CONVERSATION ID HERE, IT MUST BE A VALID UUID', diff --git a/web/context/web-app-context.tsx b/web/context/web-app-context.tsx index db1c5158dd..e78ef81bbc 100644 --- a/web/context/web-app-context.tsx +++ b/web/context/web-app-context.tsx @@ -61,22 +61,18 @@ const WebAppStoreProvider: FC = ({ children }) => { const pathname = usePathname() const searchParams = useSearchParams() const redirectUrlParam = searchParams.get('redirect_url') - const session = searchParams.get('session') - const sysUserId = searchParams.get('sys.user_id') - const [shareCode, setShareCode] = useState(null) - useEffect(() => { - const shareCodeFromRedirect = getShareCodeFromRedirectUrl(redirectUrlParam) - const shareCodeFromPathname = getShareCodeFromPathname(pathname) - const newShareCode = shareCodeFromRedirect || shareCodeFromPathname - setShareCode(newShareCode) - updateShareCode(newShareCode) - }, [pathname, redirectUrlParam, updateShareCode]) + + // Compute shareCode directly + const shareCode = getShareCodeFromRedirectUrl(redirectUrlParam) || getShareCodeFromPathname(pathname) + updateShareCode(shareCode) + const { isFetching, data: accessModeResult } = useGetWebAppAccessModeByCode(shareCode) - const [isFetchingAccessToken, setIsFetchingAccessToken] = useState(true) + const [isFetchingAccessToken, setIsFetchingAccessToken] = useState(false) + useEffect(() => { if (accessModeResult?.accessMode) { updateWebAppAccessMode(accessModeResult.accessMode) - if (accessModeResult?.accessMode === AccessMode.PUBLIC && session && sysUserId) { + if (accessModeResult.accessMode === AccessMode.PUBLIC) { setIsFetchingAccessToken(true) checkOrSetAccessToken(shareCode).finally(() => { setIsFetchingAccessToken(false) @@ -86,7 +82,8 @@ const WebAppStoreProvider: FC = ({ children }) => { setIsFetchingAccessToken(false) } } - }, [accessModeResult, updateWebAppAccessMode, setIsFetchingAccessToken, shareCode, session, sysUserId]) + }, [accessModeResult, updateWebAppAccessMode, shareCode]) + if (isFetching || isFetchingAccessToken) { return
    From a434f6240ffca455da88d2e2c4884464adae0e4b Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Thu, 31 Jul 2025 15:33:39 +0800 Subject: [PATCH 087/415] Chroe: some misc cleanup (#23203) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/ops/ops_trace_manager.py | 2 +- .../time/tools/timezone_conversion.py | 2 +- .../workflow/nodes/document_extractor/node.py | 2 +- api/core/workflow/nodes/llm/node.py | 19 +------------------ 4 files changed, 4 insertions(+), 21 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index b769934a9b..7eb5da7e3a 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -322,7 +322,7 @@ class OpsTraceManager: :return: """ # auth check - if enabled == True: + if enabled: try: provider_config_map[tracing_provider] except KeyError: diff --git a/api/core/tools/builtin_tool/providers/time/tools/timezone_conversion.py b/api/core/tools/builtin_tool/providers/time/tools/timezone_conversion.py index f9b776b3b9..91316b859a 100644 --- a/api/core/tools/builtin_tool/providers/time/tools/timezone_conversion.py +++ b/api/core/tools/builtin_tool/providers/time/tools/timezone_conversion.py @@ -27,7 +27,7 @@ class TimezoneConversionTool(BuiltinTool): target_time = self.timezone_convert(current_time, current_timezone, target_timezone) # type: ignore if not target_time: yield self.create_text_message( - f"Invalid datatime and timezone: {current_time},{current_timezone},{target_timezone}" + f"Invalid datetime and timezone: {current_time},{current_timezone},{target_timezone}" ) return diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index f3061f7d96..23512c8ce4 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -597,7 +597,7 @@ def _extract_text_from_vtt(vtt_bytes: bytes) -> str: for i in range(1, len(raw_results)): spk, txt = raw_results[i] - if spk == None: + if spk is None: merged_results.append((None, current_text)) continue diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 90a0397b67..dfc2a0000b 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -3,7 +3,7 @@ import io import json import logging from collections.abc import Generator, Mapping, Sequence -from typing import TYPE_CHECKING, Any, Optional, cast +from typing import TYPE_CHECKING, Any, Optional from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file import FileType, file_manager @@ -33,12 +33,10 @@ from core.model_runtime.entities.message_entities import ( UserPromptMessage, ) from core.model_runtime.entities.model_entities import ( - AIModelEntity, ModelFeature, ModelPropertyKey, ModelType, ) -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig from core.prompt.utils.prompt_message_util import PromptMessageUtil @@ -1006,21 +1004,6 @@ class LLMNode(BaseNode): ) return saved_file - def _fetch_model_schema(self, provider: str) -> AIModelEntity | None: - """ - Fetch model schema - """ - model_name = self._node_data.model.name - model_manager = ModelManager() - model_instance = model_manager.get_model_instance( - tenant_id=self.tenant_id, model_type=ModelType.LLM, provider=provider, model=model_name - ) - model_type_instance = model_instance.model_type_instance - model_type_instance = cast(LargeLanguageModel, model_type_instance) - model_credentials = model_instance.credentials - model_schema = model_type_instance.get_model_schema(model_name, model_credentials) - return model_schema - @staticmethod def fetch_structured_output_schema( *, From a82b55005b8f3d4532d17e1181fa894a9987288a Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Thu, 31 Jul 2025 16:04:49 +0800 Subject: [PATCH 088/415] fix: resolve sidebar animation glitches and layout shifts in app detail page (#23216) (#23221) --- web/app/components/app-sidebar/app-info.tsx | 21 +- web/app/components/app-sidebar/index.tsx | 5 +- .../components/app-sidebar/navLink.spec.tsx | 189 +++++++++++ web/app/components/app-sidebar/navLink.tsx | 17 +- .../sidebar-animation-issues.spec.tsx | 297 ++++++++++++++++++ .../text-squeeze-fix-verification.spec.tsx | 235 ++++++++++++++ 6 files changed, 746 insertions(+), 18 deletions(-) create mode 100644 web/app/components/app-sidebar/navLink.spec.tsx create mode 100644 web/app/components/app-sidebar/sidebar-animation-issues.spec.tsx create mode 100644 web/app/components/app-sidebar/text-squeeze-fix-verification.spec.tsx diff --git a/web/app/components/app-sidebar/app-info.tsx b/web/app/components/app-sidebar/app-info.tsx index 58c9f7e5ca..c04d79d2f2 100644 --- a/web/app/components/app-sidebar/app-info.tsx +++ b/web/app/components/app-sidebar/app-info.tsx @@ -271,16 +271,17 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx
    - { - expand && ( -
    -
    -
    {appDetail.name}
    -
    -
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    -
    - ) - } +
    +
    +
    {appDetail.name}
    +
    +
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    +
    )} diff --git a/web/app/components/app-sidebar/index.tsx b/web/app/components/app-sidebar/index.tsx index b6bfc0e9ac..cf32339b8a 100644 --- a/web/app/components/app-sidebar/index.tsx +++ b/web/app/components/app-sidebar/index.tsx @@ -124,10 +124,7 @@ const AppDetailNav = ({ title, desc, isExternal, icon, icon_background, navigati { !isMobile && (
    ({ + useSelectedLayoutSegment: () => 'overview', +})) + +// Mock Next.js Link component +jest.mock('next/link', () => { + return function MockLink({ children, href, className, title }: any) { + return ( + + {children} + + ) + } +}) + +// Mock RemixIcon components +const MockIcon = ({ className }: { className?: string }) => ( + +) + +describe('NavLink Text Animation Issues', () => { + const mockProps: NavLinkProps = { + name: 'Orchestrate', + href: '/app/123/workflow', + iconMap: { + selected: MockIcon, + normal: MockIcon, + }, + } + + beforeEach(() => { + // Mock getComputedStyle for transition testing + Object.defineProperty(window, 'getComputedStyle', { + value: jest.fn((element) => { + const isExpanded = element.getAttribute('data-mode') === 'expand' + return { + transition: 'all 0.3s ease', + opacity: isExpanded ? '1' : '0', + width: isExpanded ? 'auto' : '0px', + overflow: 'hidden', + paddingLeft: isExpanded ? '12px' : '10px', // px-3 vs px-2.5 + paddingRight: isExpanded ? '12px' : '10px', + } + }), + writable: true, + }) + }) + + describe('Text Squeeze Animation Issue', () => { + it('should show text squeeze effect when switching from collapse to expand', async () => { + const { rerender } = render() + + // In collapse mode, text should be in DOM but hidden via CSS + const textElement = screen.getByText('Orchestrate') + expect(textElement).toBeInTheDocument() + expect(textElement).toHaveClass('opacity-0') + expect(textElement).toHaveClass('w-0') + expect(textElement).toHaveClass('overflow-hidden') + + // Icon should still be present + expect(screen.getByTestId('nav-icon')).toBeInTheDocument() + + // Check padding in collapse mode + const linkElement = screen.getByTestId('nav-link') + expect(linkElement).toHaveClass('px-2.5') + + // Switch to expand mode - this is where the squeeze effect occurs + rerender() + + // Text should now appear + expect(screen.getByText('Orchestrate')).toBeInTheDocument() + + // Check padding change - this contributes to the squeeze effect + expect(linkElement).toHaveClass('px-3') + + // The bug: text appears abruptly without smooth transition + // This test documents the current behavior that causes the squeeze effect + const expandedTextElement = screen.getByText('Orchestrate') + expect(expandedTextElement).toBeInTheDocument() + + // In a properly animated version, we would expect: + // - Opacity transition from 0 to 1 + // - Width transition from 0 to auto + // - No layout shift from padding changes + }) + + it('should maintain icon position consistency during text appearance', () => { + const { rerender } = render() + + const iconElement = screen.getByTestId('nav-icon') + const initialIconClasses = iconElement.className + + // Icon should have mr-0 in collapse mode + expect(iconElement).toHaveClass('mr-0') + + rerender() + + const expandedIconClasses = iconElement.className + + // Icon should have mr-2 in expand mode - this shift contributes to the squeeze effect + expect(iconElement).toHaveClass('mr-2') + + console.log('Collapsed icon classes:', initialIconClasses) + console.log('Expanded icon classes:', expandedIconClasses) + + // This margin change causes the icon to shift when text appears + }) + + it('should document the abrupt text rendering issue', () => { + const { rerender } = render() + + // Text is present in DOM but hidden via CSS classes + const collapsedText = screen.getByText('Orchestrate') + expect(collapsedText).toBeInTheDocument() + expect(collapsedText).toHaveClass('opacity-0') + expect(collapsedText).toHaveClass('pointer-events-none') + + rerender() + + // Text suddenly appears in DOM - no transition + expect(screen.getByText('Orchestrate')).toBeInTheDocument() + + // The issue: {mode === 'expand' && name} causes abrupt show/hide + // instead of smooth opacity/width transition + }) + }) + + describe('Layout Shift Issues', () => { + it('should detect padding differences causing layout shifts', () => { + const { rerender } = render() + + const linkElement = screen.getByTestId('nav-link') + + // Collapsed state padding + expect(linkElement).toHaveClass('px-2.5') + + rerender() + + // Expanded state padding - different value causes layout shift + expect(linkElement).toHaveClass('px-3') + + // This 2px difference (10px vs 12px) contributes to the squeeze effect + }) + + it('should detect icon margin changes causing shifts', () => { + const { rerender } = render() + + const iconElement = screen.getByTestId('nav-icon') + + // Collapsed: no right margin + expect(iconElement).toHaveClass('mr-0') + + rerender() + + // Expanded: 8px right margin (mr-2) + expect(iconElement).toHaveClass('mr-2') + + // This sudden margin appearance causes the squeeze effect + }) + }) + + describe('Active State Handling', () => { + it('should handle active state correctly in both modes', () => { + // Test non-active state + const { rerender } = render() + + let linkElement = screen.getByTestId('nav-link') + expect(linkElement).not.toHaveClass('bg-state-accent-active') + + // Test with active state (when href matches current segment) + const activeProps = { + ...mockProps, + href: '/app/123/overview', // matches mocked segment + } + + rerender() + + linkElement = screen.getByTestId('nav-link') + expect(linkElement).toHaveClass('bg-state-accent-active') + }) + }) +}) diff --git a/web/app/components/app-sidebar/navLink.tsx b/web/app/components/app-sidebar/navLink.tsx index 295b553b04..4607f7b693 100644 --- a/web/app/components/app-sidebar/navLink.tsx +++ b/web/app/components/app-sidebar/navLink.tsx @@ -44,20 +44,29 @@ export default function NavLink({ key={name} href={href} className={classNames( - isActive ? 'bg-state-accent-active text-text-accent font-semibold' : 'text-components-menu-item-text hover:bg-state-base-hover hover:text-components-menu-item-text-hover', - 'group flex items-center h-9 rounded-md py-2 text-sm font-normal', + isActive ? 'bg-state-accent-active font-semibold text-text-accent' : 'text-components-menu-item-text hover:bg-state-base-hover hover:text-components-menu-item-text-hover', + 'group flex h-9 items-center rounded-md py-2 text-sm font-normal', mode === 'expand' ? 'px-3' : 'px-2.5', )} title={mode === 'collapse' ? name : ''} >
    + Update the value of a specific conversation variable. This endpoint allows you to modify the value of a variable that was captured during the conversation while preserving its name, type, and description. + + ### Path Parameters + + + + The ID of the conversation containing the variable to update. + + + The ID of the variable to update. + + + + ### Request Body + + + + The new value for the variable. Must match the variable's expected type (string, number, object, etc.). + + + The user identifier, defined by the developer, must ensure uniqueness within the application. + + + + ### Response + + Returns the updated variable object with: + - `id` (string) Variable ID + - `name` (string) Variable name + - `value_type` (string) Variable type (string, number, object, etc.) + - `value` (any) Updated variable value + - `description` (string) Variable description + - `created_at` (int) Creation timestamp + - `updated_at` (int) Last update timestamp + + ### Errors + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, Value type doesn't match variable's expected type + - 404, `conversation_not_exists`, Conversation not found + - 404, `conversation_variable_not_exists`, Variable not found + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: 'String Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "New string value", + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'Number Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'Object Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "Customer name extracted from the conversation", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + + + + 特定の会話変数の値を更新します。このエンドポイントは、名前、型、説明を保持しながら、会話中にキャプチャされた変数の値を変更することを可能にします。 + + ### パスパラメータ + + + + 更新する変数を含む会話のID。 + + + 更新する変数のID。 + + + + ### リクエストボディ + + + + 変数の新しい値。変数の期待される型(文字列、数値、オブジェクトなど)と一致する必要があります。 + + + ユーザー識別子。開発者によって定義されたルールに従い、アプリケーション内で一意である必要があります。 + + + + ### レスポンス + + 以下を含む更新された変数オブジェクトを返します: + - `id` (string) 変数ID + - `name` (string) 変数名 + - `value_type` (string) 変数型(文字列、数値、オブジェクトなど) + - `value` (any) 更新された変数値 + - `description` (string) 変数の説明 + - `created_at` (int) 作成タイムスタンプ + - `updated_at` (int) 最終更新タイムスタンプ + + ### エラー + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, 値の型が変数の期待される型と一致しません + - 404, `conversation_not_exists`, 会話が見つかりません + - 404, `conversation_variable_not_exists`, 変数が見つかりません + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: '文字列値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "新しい文字列値", + "user": "abc-123" + }' + ``` + + ```bash {{ title: '数値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'オブジェクト値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "会話から抽出された顧客名", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + + + + 更新特定对话变量的值。此端点允许您修改在对话过程中捕获的变量值,同时保留其名称、类型和描述。 + + ### 路径参数 + + + + 包含要更新变量的对话ID。 + + + 要更新的变量ID。 + + + + ### 请求体 + + + + 变量的新值。必须匹配变量的预期类型(字符串、数字、对象等)。 + + + 用户标识符,由开发人员定义的规则,在应用程序内必须唯一。 + + + + ### 响应 + + 返回包含以下内容的更新变量对象: + - `id` (string) 变量ID + - `name` (string) 变量名称 + - `value_type` (string) 变量类型(字符串、数字、对象等) + - `value` (any) 更新后的变量值 + - `description` (string) 变量描述 + - `created_at` (int) 创建时间戳 + - `updated_at` (int) 最后更新时间戳 + + ### 错误 + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, 值类型与变量的预期类型不匹配 + - 404, `conversation_not_exists`, 对话不存在 + - 404, `conversation_variable_not_exists`, 变量不存在 + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: '字符串值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "新的字符串值", + "user": "abc-123" + }' + ``` + + ```bash {{ title: '数字值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: '对象值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "客户名称(从对话中提取)", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + + + + Update the value of a specific conversation variable. This endpoint allows you to modify the value of a variable that was captured during the conversation while preserving its name, type, and description. + + ### Path Parameters + + + + The ID of the conversation containing the variable to update. + + + The ID of the variable to update. + + + + ### Request Body + + + + The new value for the variable. Must match the variable's expected type (string, number, object, etc.). + + + The user identifier, defined by the developer, must ensure uniqueness within the application. + + + + ### Response + + Returns the updated variable object with: + - `id` (string) Variable ID + - `name` (string) Variable name + - `value_type` (string) Variable type (string, number, object, etc.) + - `value` (any) Updated variable value + - `description` (string) Variable description + - `created_at` (int) Creation timestamp + - `updated_at` (int) Last update timestamp + + ### Errors + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, Value type doesn't match variable's expected type + - 404, `conversation_not_exists`, Conversation not found + - 404, `conversation_variable_not_exists`, Variable not found + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: 'String Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "New string value", + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'Number Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'Object Value' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "Customer name extracted from the conversation", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + + + + 特定の会話変数の値を更新します。このエンドポイントは、名前、型、説明を保持しながら、会話中にキャプチャされた変数の値を変更することを可能にします。 + + ### パスパラメータ + + + + 更新する変数を含む会話のID。 + + + 更新する変数のID。 + + + + ### リクエストボディ + + + + 変数の新しい値。変数の期待される型(文字列、数値、オブジェクトなど)と一致する必要があります。 + + + ユーザー識別子。開発者によって定義されたルールに従い、アプリケーション内で一意である必要があります。 + + + + ### レスポンス + + 以下を含む更新された変数オブジェクトを返します: + - `id` (string) 変数ID + - `name` (string) 変数名 + - `value_type` (string) 変数型(文字列、数値、オブジェクトなど) + - `value` (any) 更新された変数値 + - `description` (string) 変数の説明 + - `created_at` (int) 作成タイムスタンプ + - `updated_at` (int) 最終更新タイムスタンプ + + ### エラー + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, 値の型が変数の期待される型と一致しません + - 404, `conversation_not_exists`, 会話が見つかりません + - 404, `conversation_variable_not_exists`, 変数が見つかりません + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: '文字列値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "新しい文字列値", + "user": "abc-123" + }' + ``` + + ```bash {{ title: '数値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: 'オブジェクト値' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "会話から抽出された顧客名", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + + + + 更新特定对话变量的值。此端点允许您修改在对话过程中捕获的变量值,同时保留其名称、类型和描述。 + + ### 路径参数 + + + + 包含要更新变量的对话ID。 + + + 要更新的变量ID。 + + + + ### 请求体 + + + + 变量的新值。必须匹配变量的预期类型(字符串、数字、对象等)。 + + + 用户标识符,由开发人员定义的规则,在应用程序内必须唯一。 + + + + ### 响应 + + 返回包含以下内容的更新变量对象: + - `id` (string) 变量ID + - `name` (string) 变量名称 + - `value_type` (string) 变量类型(字符串、数字、对象等) + - `value` (any) 更新后的变量值 + - `description` (string) 变量描述 + - `created_at` (int) 创建时间戳 + - `updated_at` (int) 最后更新时间戳 + + ### 错误 + - 400, `Type mismatch: variable expects {expected_type}, but got {actual_type} type`, 值类型与变量的预期类型不匹配 + - 404, `conversation_not_exists`, 对话不存在 + - 404, `conversation_variable_not_exists`, 变量不存在 + + + + + + + ```bash {{ title: 'cURL' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "Updated Value", + "user": "abc-123" + }' + ``` + + + + + ```bash {{ title: '字符串值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": "新的字符串值", + "user": "abc-123" + }' + ``` + + ```bash {{ title: '数字值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": 42, + "user": "abc-123" + }' + ``` + + ```bash {{ title: '对象值' }} + curl -X PUT '${props.appDetail.api_base_url}/conversations/{conversation_id}/variables/{variable_id}' \ + --header 'Content-Type: application/json' \ + --header 'Authorization: Bearer {api_key}' \ + --data-raw '{ + "value": {"product": "Widget", "quantity": 10, "price": 29.99}, + "user": "abc-123" + }' + ``` + + + + ```json {{ title: 'Response' }} + { + "id": "variable-uuid-1", + "name": "customer_name", + "value_type": "string", + "value": "Updated Value", + "description": "客户名称(从对话中提取)", + "created_at": 1650000000000, + "updated_at": 1650000001000 + } + ``` + + + + +--- + Date: Thu, 31 Jul 2025 22:26:50 -0400 Subject: [PATCH 096/415] fix: resolve multipart/form-data boundary issue in HTTP Request compo nent #22880 (#23008) Co-authored-by: crazywoola <427733928@qq.com> --- .../workflow/nodes/http_request/executor.py | 39 +++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index fe103c7117..2106369bd6 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -277,6 +277,22 @@ class Executor: elif self.auth.config.type == "custom": headers[authorization.config.header] = authorization.config.api_key or "" + # Handle Content-Type for multipart/form-data requests + # Fix for issue #22880: Missing boundary when using multipart/form-data + body = self.node_data.body + if body and body.type == "form-data": + # For multipart/form-data with files, let httpx handle the boundary automatically + # by not setting Content-Type header when files are present + if not self.files or all(f[0] == "__multipart_placeholder__" for f in self.files): + # Only set Content-Type when there are no actual files + # This ensures httpx generates the correct boundary + if "content-type" not in (k.lower() for k in headers): + headers["Content-Type"] = "multipart/form-data" + elif body and body.type in BODY_TYPE_TO_CONTENT_TYPE: + # Set Content-Type for other body types + if "content-type" not in (k.lower() for k in headers): + headers["Content-Type"] = BODY_TYPE_TO_CONTENT_TYPE[body.type] + return headers def _validate_and_parse_response(self, response: httpx.Response) -> Response: @@ -384,15 +400,24 @@ class Executor: # '__multipart_placeholder__' is inserted to force multipart encoding but is not a real file. # This prevents logging meaningless placeholder entries. if self.files and not all(f[0] == "__multipart_placeholder__" for f in self.files): - for key, (filename, content, mime_type) in self.files: + for file_entry in self.files: + # file_entry should be (key, (filename, content, mime_type)), but handle edge cases + if len(file_entry) != 2 or not isinstance(file_entry[1], tuple) or len(file_entry[1]) < 2: + continue # skip malformed entries + key = file_entry[0] + content = file_entry[1][1] body_string += f"--{boundary}\r\n" body_string += f'Content-Disposition: form-data; name="{key}"\r\n\r\n' - # decode content - try: - body_string += content.decode("utf-8") - except UnicodeDecodeError: - # fix: decode binary content - pass + # decode content safely + if isinstance(content, bytes): + try: + body_string += content.decode("utf-8") + except UnicodeDecodeError: + body_string += content.decode("utf-8", errors="replace") + elif isinstance(content, str): + body_string += content + else: + body_string += f"[Unsupported content type: {type(content).__name__}]" body_string += "\r\n" body_string += f"--{boundary}--\r\n" elif self.node_data.body: From c33741a5e9599a0e7b944dc96baf57aeaf70ad12 Mon Sep 17 00:00:00 2001 From: Alan Bustamante Date: Fri, 1 Aug 2025 04:34:46 +0200 Subject: [PATCH 097/415] fix: improve boolean field handling in plugin configuration forms (#23160) Co-authored-by: crazywoola <427733928@qq.com> --- .../plugin-detail-panel/endpoint-modal.tsx | 17 ++++++++++++++++- .../components/tools/utils/to-form-schema.ts | 10 ++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx b/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx index 130773e0c2..a715237a43 100644 --- a/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx +++ b/web/app/components/plugins/plugin-detail-panel/endpoint-modal.tsx @@ -47,7 +47,22 @@ const EndpointModal: FC = ({ return } } - onSaved(tempCredential) + + // Fix: Process boolean fields to ensure they are sent as proper boolean values + const processedCredential = { ...tempCredential } + formSchemas.forEach((field) => { + if (field.type === 'boolean' && processedCredential[field.name] !== undefined) { + const value = processedCredential[field.name] + if (typeof value === 'string') + processedCredential[field.name] = value === 'true' || value === '1' || value === 'True' + else if (typeof value === 'number') + processedCredential[field.name] = value === 1 + else if (typeof value === 'boolean') + processedCredential[field.name] = value + } + }) + + onSaved(processedCredential) } return ( diff --git a/web/app/components/tools/utils/to-form-schema.ts b/web/app/components/tools/utils/to-form-schema.ts index ee7f3379ad..ae43e6f157 100644 --- a/web/app/components/tools/utils/to-form-schema.ts +++ b/web/app/components/tools/utils/to-form-schema.ts @@ -63,6 +63,16 @@ export const addDefaultValue = (value: Record, formSchemas: { varia const itemValue = value[formSchema.variable] if ((formSchema.default !== undefined) && (value === undefined || itemValue === null || itemValue === '' || itemValue === undefined)) newValues[formSchema.variable] = formSchema.default + + // Fix: Convert boolean field values to proper boolean type + if (formSchema.type === 'boolean' && itemValue !== undefined && itemValue !== null && itemValue !== '') { + if (typeof itemValue === 'string') + newValues[formSchema.variable] = itemValue === 'true' || itemValue === '1' || itemValue === 'True' + else if (typeof itemValue === 'number') + newValues[formSchema.variable] = itemValue === 1 + else if (typeof itemValue === 'boolean') + newValues[formSchema.variable] = itemValue + } }) return newValues } From da5c003f9788790277ae9dd41414a56ad644aab2 Mon Sep 17 00:00:00 2001 From: wanttobeamaster <45583625+wanttobeamaster@users.noreply.github.com> Date: Fri, 1 Aug 2025 14:14:11 +0800 Subject: [PATCH 098/415] chore: tablestore full text search support score normalization (#23255) Co-authored-by: xiaozhiqing.xzq --- api/.env.example | 1 + .../middleware/vdb/tablestore_config.py | 5 +++ .../vdb/tablestore/tablestore_vector.py | 40 ++++++++++++++++--- .../vdb/tablestore/test_tablestore.py | 22 +++++++++- docker/.env.example | 1 + docker/docker-compose.yaml | 1 + 6 files changed, 63 insertions(+), 7 deletions(-) diff --git a/api/.env.example b/api/.env.example index 18f2dbf647..4beabfecea 100644 --- a/api/.env.example +++ b/api/.env.example @@ -232,6 +232,7 @@ TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com TABLESTORE_INSTANCE_NAME=instance-name TABLESTORE_ACCESS_KEY_ID=xxx TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false # Tidb Vector configuration TIDB_VECTOR_HOST=xxx.eu-central-1.xxx.aws.tidbcloud.com diff --git a/api/configs/middleware/vdb/tablestore_config.py b/api/configs/middleware/vdb/tablestore_config.py index c4dcc0d465..1aab01c6e1 100644 --- a/api/configs/middleware/vdb/tablestore_config.py +++ b/api/configs/middleware/vdb/tablestore_config.py @@ -28,3 +28,8 @@ class TableStoreConfig(BaseSettings): description="AccessKey secret for the instance name", default=None, ) + + TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: bool = Field( + description="Whether to normalize full-text search scores to [0, 1]", + default=False, + ) diff --git a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py index 784e27fc7f..91d667ff2c 100644 --- a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py +++ b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py @@ -1,5 +1,6 @@ import json import logging +import math from typing import Any, Optional import tablestore # type: ignore @@ -22,6 +23,7 @@ class TableStoreConfig(BaseModel): access_key_secret: Optional[str] = None instance_name: Optional[str] = None endpoint: Optional[str] = None + normalize_full_text_bm25_score: Optional[bool] = False @model_validator(mode="before") @classmethod @@ -47,6 +49,7 @@ class TableStoreVector(BaseVector): config.access_key_secret, config.instance_name, ) + self._normalize_full_text_bm25_score = config.normalize_full_text_bm25_score self._table_name = f"{collection_name}" self._index_name = f"{collection_name}_idx" self._tags_field = f"{Field.METADATA_KEY.value}_tags" @@ -131,8 +134,8 @@ class TableStoreVector(BaseVector): filtered_list = None if document_ids_filter: filtered_list = ["document_id=" + item for item in document_ids_filter] - - return self._search_by_full_text(query, filtered_list, top_k) + score_threshold = float(kwargs.get("score_threshold") or 0.0) + return self._search_by_full_text(query, filtered_list, top_k, score_threshold) def delete(self) -> None: self._delete_table_if_exist() @@ -318,7 +321,19 @@ class TableStoreVector(BaseVector): documents = sorted(documents, key=lambda x: x.metadata["score"] if x.metadata else 0, reverse=True) return documents - def _search_by_full_text(self, query: str, document_ids_filter: list[str] | None, top_k: int) -> list[Document]: + @staticmethod + def _normalize_score_exp_decay(score: float, k: float = 0.15) -> float: + """ + Args: + score: BM25 search score. + k: decay factor, the larger the k, the steeper the low score end + """ + normalized_score = 1 - math.exp(-k * score) + return max(0.0, min(1.0, normalized_score)) + + def _search_by_full_text( + self, query: str, document_ids_filter: list[str] | None, top_k: int, score_threshold: float + ) -> list[Document]: bool_query = tablestore.BoolQuery(must_queries=[], filter_queries=[], should_queries=[], must_not_queries=[]) bool_query.must_queries.append(tablestore.MatchQuery(text=query, field_name=Field.CONTENT_KEY.value)) @@ -339,15 +354,27 @@ class TableStoreVector(BaseVector): documents = [] for search_hit in search_response.search_hits: + score = None + if self._normalize_full_text_bm25_score: + score = self._normalize_score_exp_decay(search_hit.score) + + # skip when score is below threshold and use normalize score + if score and score <= score_threshold: + continue + ots_column_map = {} for col in search_hit.row[1]: ots_column_map[col[0]] = col[1] - vector_str = ots_column_map.get(Field.VECTOR.value) metadata_str = ots_column_map.get(Field.METADATA_KEY.value) - vector = json.loads(vector_str) if vector_str else None metadata = json.loads(metadata_str) if metadata_str else {} + vector_str = ots_column_map.get(Field.VECTOR.value) + vector = json.loads(vector_str) if vector_str else None + + if score: + metadata["score"] = score + documents.append( Document( page_content=ots_column_map.get(Field.CONTENT_KEY.value) or "", @@ -355,6 +382,8 @@ class TableStoreVector(BaseVector): metadata=metadata, ) ) + if self._normalize_full_text_bm25_score: + documents = sorted(documents, key=lambda x: x.metadata["score"] if x.metadata else 0, reverse=True) return documents @@ -375,5 +404,6 @@ class TableStoreVectorFactory(AbstractVectorFactory): instance_name=dify_config.TABLESTORE_INSTANCE_NAME, access_key_id=dify_config.TABLESTORE_ACCESS_KEY_ID, access_key_secret=dify_config.TABLESTORE_ACCESS_KEY_SECRET, + normalize_full_text_bm25_score=dify_config.TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE, ), ) diff --git a/api/tests/integration_tests/vdb/tablestore/test_tablestore.py b/api/tests/integration_tests/vdb/tablestore/test_tablestore.py index da549af1b6..aebf3fbda1 100644 --- a/api/tests/integration_tests/vdb/tablestore/test_tablestore.py +++ b/api/tests/integration_tests/vdb/tablestore/test_tablestore.py @@ -2,6 +2,7 @@ import os import uuid import tablestore +from _pytest.python_api import approx from core.rag.datasource.vdb.tablestore.tablestore_vector import ( TableStoreConfig, @@ -16,7 +17,7 @@ from tests.integration_tests.vdb.test_vector_store import ( class TableStoreVectorTest(AbstractVectorTest): - def __init__(self): + def __init__(self, normalize_full_text_score: bool = False): super().__init__() self.vector = TableStoreVector( collection_name=self.collection_name, @@ -25,6 +26,7 @@ class TableStoreVectorTest(AbstractVectorTest): instance_name=os.getenv("TABLESTORE_INSTANCE_NAME"), access_key_id=os.getenv("TABLESTORE_ACCESS_KEY_ID"), access_key_secret=os.getenv("TABLESTORE_ACCESS_KEY_SECRET"), + normalize_full_text_bm25_score=normalize_full_text_score, ), ) @@ -64,7 +66,21 @@ class TableStoreVectorTest(AbstractVectorTest): docs = self.vector.search_by_full_text(get_example_text(), document_ids_filter=[self.example_doc_id]) assert len(docs) == 1 assert docs[0].metadata["doc_id"] == self.example_doc_id - assert not hasattr(docs[0], "score") + if self.vector._config.normalize_full_text_bm25_score: + assert docs[0].metadata["score"] == approx(0.1214, abs=1e-3) + else: + assert docs[0].metadata.get("score") is None + + # return none if normalize_full_text_score=true and score_threshold > 0 + docs = self.vector.search_by_full_text( + get_example_text(), document_ids_filter=[self.example_doc_id], score_threshold=0.5 + ) + if self.vector._config.normalize_full_text_bm25_score: + assert len(docs) == 0 + else: + assert len(docs) == 1 + assert docs[0].metadata["doc_id"] == self.example_doc_id + assert docs[0].metadata.get("score") is None docs = self.vector.search_by_full_text(get_example_text(), document_ids_filter=[str(uuid.uuid4())]) assert len(docs) == 0 @@ -80,3 +96,5 @@ class TableStoreVectorTest(AbstractVectorTest): def test_tablestore_vector(setup_mock_redis): TableStoreVectorTest().run_all_tests() + TableStoreVectorTest(normalize_full_text_score=True).run_all_tests() + TableStoreVectorTest(normalize_full_text_score=False).run_all_tests() diff --git a/docker/.env.example b/docker/.env.example index 7ecdf899fe..13cac189aa 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -653,6 +653,7 @@ TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com TABLESTORE_INSTANCE_NAME=instance-name TABLESTORE_ACCESS_KEY_ID=xxx TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false # ------------------------------ # Knowledge Configuration diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index ae83aa758d..690dccb1a8 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -312,6 +312,7 @@ x-shared-env: &shared-api-worker-env TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} + TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} ETL_TYPE: ${ETL_TYPE:-dify} From f7016fd92218e30d4fccd3957a9d2332c89913e5 Mon Sep 17 00:00:00 2001 From: HyaCinth <88471803+HyaCiovo@users.noreply.github.com> Date: Fri, 1 Aug 2025 14:18:38 +0800 Subject: [PATCH 099/415] chore: Optimize component styles and interactions (#23250) (#23251) --- .../plugins/marketplace/empty/index.tsx | 2 +- .../block-selector/tool/action-item.tsx | 1 + .../nodes/agent/components/tool-icon.tsx | 56 ++++++++++--------- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/web/app/components/plugins/marketplace/empty/index.tsx b/web/app/components/plugins/marketplace/empty/index.tsx index 0306d5003d..a9cf125a15 100644 --- a/web/app/components/plugins/marketplace/empty/index.tsx +++ b/web/app/components/plugins/marketplace/empty/index.tsx @@ -28,7 +28,7 @@ const Empty = ({
    11 && 'mb-0', lightCard && 'bg-background-default-lighter opacity-75', diff --git a/web/app/components/workflow/block-selector/tool/action-item.tsx b/web/app/components/workflow/block-selector/tool/action-item.tsx index e5e33614b0..4a2ed4a87b 100644 --- a/web/app/components/workflow/block-selector/tool/action-item.tsx +++ b/web/app/components/workflow/block-selector/tool/action-item.tsx @@ -34,6 +34,7 @@ const ToolItem: FC = ({ diff --git a/web/app/components/workflow/nodes/agent/components/tool-icon.tsx b/web/app/components/workflow/nodes/agent/components/tool-icon.tsx index 4ff0cd780d..8e6993a78d 100644 --- a/web/app/components/workflow/nodes/agent/components/tool-icon.tsx +++ b/web/app/components/workflow/nodes/agent/components/tool-icon.tsx @@ -61,37 +61,39 @@ export const ToolIcon = memo(({ providerName }: ToolIconProps) => { >
    - {(() => { - if (iconFetchError || !icon) +
    + {(() => { + if (iconFetchError || !icon) + return + if (typeof icon === 'string') { + return tool icon setIconFetchError(true)} + /> + } + if (typeof icon === 'object') { + return + } return - if (typeof icon === 'string') { - return tool icon setIconFetchError(true)} - /> - } - if (typeof icon === 'object') { - return - } - return - })()} - {indicator && } + })()} +
    + {indicator && }
    }) From 05b002a8b713d762a4ae3b9549cf6eb8fa494b44 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Fri, 1 Aug 2025 14:22:59 +0800 Subject: [PATCH 100/415] Add a practical AKS one-click deployment Helm (#23253) --- README.md | 4 ++++ README_AR.md | 4 ++++ README_BN.md | 4 ++++ README_CN.md | 3 +++ README_DE.md | 4 ++++ README_ES.md | 4 ++++ README_FR.md | 4 ++++ README_JA.md | 4 ++++ README_KL.md | 4 ++++ README_KR.md | 4 ++++ README_PT.md | 4 ++++ README_SI.md | 4 ++++ README_TR.md | 4 ++++ README_TW.md | 4 ++++ README_VI.md | 4 ++++ 15 files changed, 59 insertions(+) diff --git a/README.md b/README.md index 16a1268cb1..775f6f351f 100644 --- a/README.md +++ b/README.md @@ -235,6 +235,10 @@ Quickly deploy Dify to Alibaba cloud with [Alibaba Cloud Computing Nest](https:/ One-Click deploy Dify to Alibaba Cloud with [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Deploy to AKS with Azure Devops Pipeline + +One-Click deploy Dify to AKS with [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Contributing diff --git a/README_AR.md b/README_AR.md index d2cb0098a3..e7a4dbdb27 100644 --- a/README_AR.md +++ b/README_AR.md @@ -217,6 +217,10 @@ docker compose up -d انشر ​​Dify على علي بابا كلاود بنقرة واحدة باستخدام [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### استخدام Azure Devops Pipeline للنشر على AKS + +انشر Dify على AKS بنقرة واحدة باستخدام [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## المساهمة diff --git a/README_BN.md b/README_BN.md index f57413ec8b..e4da437eff 100644 --- a/README_BN.md +++ b/README_BN.md @@ -235,6 +235,10 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) + #### AKS-এ ডিপ্লয় করার জন্য Azure Devops Pipeline ব্যবহার + +[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) ব্যবহার করে Dify কে AKS-এ এক ক্লিকে ডিপ্লয় করুন + ## Contributing diff --git a/README_CN.md b/README_CN.md index e9c73eb48b..82149519d3 100644 --- a/README_CN.md +++ b/README_CN.md @@ -233,6 +233,9 @@ docker compose up -d 使用 [阿里云数据管理DMS](https://help.aliyun.com/zh/dms/dify-in-invitational-preview) 将 Dify 一键部署到 阿里云 +#### 使用 Azure Devops Pipeline 部署到AKS + +使用[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) 将 Dify 一键部署到 AKS ## Star History diff --git a/README_DE.md b/README_DE.md index d31a56542d..2420ac0392 100644 --- a/README_DE.md +++ b/README_DE.md @@ -230,6 +230,10 @@ Bereitstellung von Dify auf AWS mit [CDK](https://aws.amazon.com/cdk/) Ein-Klick-Bereitstellung von Dify in der Alibaba Cloud mit [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Verwendung von Azure Devops Pipeline für AKS-Bereitstellung + +Stellen Sie Dify mit einem Klick in AKS bereit, indem Sie [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) verwenden + ## Contributing diff --git a/README_ES.md b/README_ES.md index 918bfe2286..4fa59dc18f 100644 --- a/README_ES.md +++ b/README_ES.md @@ -230,6 +230,10 @@ Despliegue Dify en AWS usando [CDK](https://aws.amazon.com/cdk/) Despliega Dify en Alibaba Cloud con un solo clic con [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Uso de Azure Devops Pipeline para implementar en AKS + +Implementa Dify en AKS con un clic usando [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Contribuir diff --git a/README_FR.md b/README_FR.md index 56ca878aae..dcbc869620 100644 --- a/README_FR.md +++ b/README_FR.md @@ -228,6 +228,10 @@ Déployez Dify sur AWS en utilisant [CDK](https://aws.amazon.com/cdk/) Déployez Dify en un clic sur Alibaba Cloud avec [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Utilisation d'Azure Devops Pipeline pour déployer sur AKS + +Déployez Dify sur AKS en un clic en utilisant [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Contribuer diff --git a/README_JA.md b/README_JA.md index 6d277a36ed..d840fd6419 100644 --- a/README_JA.md +++ b/README_JA.md @@ -227,6 +227,10 @@ docker compose up -d #### Alibaba Cloud Data Management [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) を利用して、DifyをAlibaba Cloudへワンクリックでデプロイできます +#### AKSへのデプロイにAzure Devops Pipelineを使用 + +[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS)を使用してDifyをAKSにワンクリックでデプロイ + ## 貢献 diff --git a/README_KL.md b/README_KL.md index dac67eeb29..41c7969e1c 100644 --- a/README_KL.md +++ b/README_KL.md @@ -228,6 +228,10 @@ wa'logh nIqHom neH ghun deployment toy'wI' [CDK](https://aws.amazon.com/cdk/) lo [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### AKS 'e' Deploy je Azure Devops Pipeline lo'laH + +[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) lo'laH Dify AKS 'e' wa'DIch click 'e' Deploy + ## Contributing diff --git a/README_KR.md b/README_KR.md index 072481da02..d4b31a8928 100644 --- a/README_KR.md +++ b/README_KR.md @@ -222,6 +222,10 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했 [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/)를 통해 원클릭으로 Dify를 Alibaba Cloud에 배포할 수 있습니다 +#### AKS에 배포하기 위해 Azure Devops Pipeline 사용 + +[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS)을 사용하여 Dify를 AKS에 원클릭으로 배포 + ## 기여 diff --git a/README_PT.md b/README_PT.md index 1260f8e6fd..94452cb233 100644 --- a/README_PT.md +++ b/README_PT.md @@ -227,6 +227,10 @@ Implante o Dify na AWS usando [CDK](https://aws.amazon.com/cdk/) Implante o Dify na Alibaba Cloud com um clique usando o [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Usando Azure Devops Pipeline para Implantar no AKS + +Implante o Dify no AKS com um clique usando [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Contribuindo diff --git a/README_SI.md b/README_SI.md index 7ded001d86..d840e9155f 100644 --- a/README_SI.md +++ b/README_SI.md @@ -228,6 +228,10 @@ Uvedite Dify v AWS z uporabo [CDK](https://aws.amazon.com/cdk/) Z enim klikom namestite Dify na Alibaba Cloud z [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Uporaba Azure Devops Pipeline za uvajanje v AKS + +Z enim klikom namestite Dify v AKS z uporabo [Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Prispevam diff --git a/README_TR.md b/README_TR.md index 37953f0de1..470a7570e0 100644 --- a/README_TR.md +++ b/README_TR.md @@ -221,6 +221,10 @@ Dify'ı bulut platformuna tek tıklamayla dağıtın [terraform](https://www.ter [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) kullanarak Dify'ı tek tıkla Alibaba Cloud'a dağıtın +#### AKS'ye Dağıtım için Azure Devops Pipeline Kullanımı + +[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) kullanarak Dify'ı tek tıkla AKS'ye dağıtın + ## Katkıda Bulunma diff --git a/README_TW.md b/README_TW.md index f70d6a25f6..18f1d2754a 100644 --- a/README_TW.md +++ b/README_TW.md @@ -233,6 +233,10 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify 透過 [阿里雲數據管理DMS](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/),一鍵將 Dify 部署至阿里雲 +#### 使用 Azure Devops Pipeline 部署到AKS + +使用[Azure Devops Pipeline Helm Chart by @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) 將 Dify 一鍵部署到 AKS + ## 貢獻 diff --git a/README_VI.md b/README_VI.md index ddd9aa95f6..2ab6da80fc 100644 --- a/README_VI.md +++ b/README_VI.md @@ -224,6 +224,10 @@ Triển khai Dify trên AWS bằng [CDK](https://aws.amazon.com/cdk/) Triển khai Dify lên Alibaba Cloud chỉ với một cú nhấp chuột bằng [Alibaba Cloud Data Management](https://www.alibabacloud.com/help/en/dms/dify-in-invitational-preview/) +#### Sử dụng Azure Devops Pipeline để Triển khai lên AKS + +Triển khai Dify lên AKS chỉ với một cú nhấp chuột bằng [Azure Devops Pipeline Helm Chart bởi @LeoZhang](https://github.com/Ruiruiz30/Dify-helm-chart-AKS) + ## Đóng góp From 759ded3e3a61669f1d88c16b5a2b83c9c6e379f0 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Fri, 1 Aug 2025 14:51:16 +0800 Subject: [PATCH 101/415] minor fix: fix default for status of TidbAuthBinding in compatible with various versions (#22288) --- ...32b3f888abf_manual_dataset_field_update.py | 25 +++++++++++++++++++ api/models/dataset.py | 2 +- .../configuration/base/warning-mask/index.tsx | 2 +- .../detail/completed/child-segment-detail.tsx | 2 -- web/app/components/workflow/index.tsx | 2 -- .../components/agent-strategy-selector.tsx | 1 - .../workflow/nodes/agent/use-config.ts | 1 - .../workflow/nodes/llm/use-config.ts | 2 -- web/app/components/workflow/types.ts | 2 +- 9 files changed, 28 insertions(+), 11 deletions(-) create mode 100644 api/migrations/versions/2025_07_24_1450-532b3f888abf_manual_dataset_field_update.py diff --git a/api/migrations/versions/2025_07_24_1450-532b3f888abf_manual_dataset_field_update.py b/api/migrations/versions/2025_07_24_1450-532b3f888abf_manual_dataset_field_update.py new file mode 100644 index 0000000000..1664fb99c4 --- /dev/null +++ b/api/migrations/versions/2025_07_24_1450-532b3f888abf_manual_dataset_field_update.py @@ -0,0 +1,25 @@ +"""manual dataset field update + +Revision ID: 532b3f888abf +Revises: 8bcc02c9bd07 +Create Date: 2025-07-24 14:50:48.779833 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '532b3f888abf' +down_revision = '8bcc02c9bd07' +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute("ALTER TABLE tidb_auth_bindings ALTER COLUMN status SET DEFAULT 'CREATING'::character varying") + + +def downgrade(): + op.execute("ALTER TABLE tidb_auth_bindings ALTER COLUMN status SET DEFAULT 'CREATING'") diff --git a/api/models/dataset.py b/api/models/dataset.py index 4d41d0c8b3..e62101ae73 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -970,7 +970,7 @@ class TidbAuthBinding(Base): cluster_id: Mapped[str] = mapped_column(String(255), nullable=False) cluster_name: Mapped[str] = mapped_column(String(255), nullable=False) active: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) - status = mapped_column(String(255), nullable=False, server_default=db.text("CREATING")) + status = mapped_column(String(255), nullable=False, server_default=db.text("'CREATING'::character varying")) account: Mapped[str] = mapped_column(String(255), nullable=False) password: Mapped[str] = mapped_column(String(255), nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/web/app/components/app/configuration/base/warning-mask/index.tsx b/web/app/components/app/configuration/base/warning-mask/index.tsx index 8bd7ea12aa..78d01703f8 100644 --- a/web/app/components/app/configuration/base/warning-mask/index.tsx +++ b/web/app/components/app/configuration/base/warning-mask/index.tsx @@ -22,7 +22,7 @@ const WarningMask: FC = ({ footer, }) => { return ( -
    {warningIcon}
    diff --git a/web/app/components/datasets/documents/detail/completed/child-segment-detail.tsx b/web/app/components/datasets/documents/detail/completed/child-segment-detail.tsx index 4fb1e90657..e686226e5f 100644 --- a/web/app/components/datasets/documents/detail/completed/child-segment-detail.tsx +++ b/web/app/components/datasets/documents/detail/completed/child-segment-detail.tsx @@ -60,7 +60,6 @@ const ChildSegmentDetail: FC = ({ const wordCountText = useMemo(() => { const count = content.length return `${formatNumber(count)} ${t('datasetDocuments.segment.characters', { count })}` - // eslint-disable-next-line react-hooks/exhaustive-deps }, [content.length]) const EditTimeText = useMemo(() => { @@ -69,7 +68,6 @@ const ChildSegmentDetail: FC = ({ dateFormat: `${t('datasetDocuments.segment.dateTimeFormat')}`, }) return `${t('datasetDocuments.segment.editedAt')} ${timeText}` - // eslint-disable-next-line react-hooks/exhaustive-deps }, [childChunkInfo?.updated_at]) return ( diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 3356188618..a5894451c1 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -190,7 +190,6 @@ export const Workflow: FC = memo(({ return () => { handleSyncWorkflowDraft(true, true) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const { handleRefreshWorkflowDraft } = useWorkflowRefreshDraft() @@ -282,7 +281,6 @@ export const Workflow: FC = memo(({ const { fetchInspectVars } = useSetWorkflowVarsWithValue() useEffect(() => { fetchInspectVars() - // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const store = useStoreApi() diff --git a/web/app/components/workflow/nodes/_base/components/agent-strategy-selector.tsx b/web/app/components/workflow/nodes/_base/components/agent-strategy-selector.tsx index c872952900..9e67debd58 100644 --- a/web/app/components/workflow/nodes/_base/components/agent-strategy-selector.tsx +++ b/web/app/components/workflow/nodes/_base/components/agent-strategy-selector.tsx @@ -143,7 +143,6 @@ export const AgentStrategySelector = memo((props: AgentStrategySelectorProps) => category: PluginType.agent, }) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, [query]) const pluginRef = useRef(null) diff --git a/web/app/components/workflow/nodes/agent/use-config.ts b/web/app/components/workflow/nodes/agent/use-config.ts index dd9236f24f..01abf7f761 100644 --- a/web/app/components/workflow/nodes/agent/use-config.ts +++ b/web/app/components/workflow/nodes/agent/use-config.ts @@ -151,7 +151,6 @@ const useConfig = (id: string, payload: AgentNodeType) => { return const newData = formattingLegacyData() setInputs(newData) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [currentStrategy]) // vars diff --git a/web/app/components/workflow/nodes/llm/use-config.ts b/web/app/components/workflow/nodes/llm/use-config.ts index b8516caed8..8c22068671 100644 --- a/web/app/components/workflow/nodes/llm/use-config.ts +++ b/web/app/components/workflow/nodes/llm/use-config.ts @@ -101,7 +101,6 @@ const useConfig = (id: string, payload: LLMNodeType) => { }) setInputs(newInputs) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, [defaultConfig, isChatModel]) const [modelChanged, setModelChanged] = useState(false) @@ -161,7 +160,6 @@ const useConfig = (id: string, payload: LLMNodeType) => { return setModelChanged(false) handleVisionConfigAfterModelChanged() - // eslint-disable-next-line react-hooks/exhaustive-deps }, [isVisionModel, modelChanged]) // variables diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index d8153cf08f..11a424c5dd 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -447,6 +447,6 @@ export enum VersionHistoryContextMenuOptions { delete = 'delete', } -export interface ChildNodeTypeCount { +export type ChildNodeTypeCount = { [key: string]: number; } From fd086b06a6461210086680e5f0a85fa596267cb8 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Fri, 1 Aug 2025 15:21:31 +0800 Subject: [PATCH 102/415] CI: restrict autofix.ci to run only in official repo (#23267) --- .github/workflows/autofix.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 5e290c5d02..152ff3b648 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -9,6 +9,7 @@ permissions: jobs: autofix: + if: github.repository == 'langgenius/dify' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From f78b903a4946e0a142d0e8ffe78d78d590b4d9f7 Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Fri, 1 Aug 2025 15:43:36 +0800 Subject: [PATCH 103/415] Chore/variable label (#23270) --- .../workflow-variable-block/component.tsx | 77 ++---------- .../readonly-input-with-select-var.tsx | 45 ++----- .../nodes/_base/components/variable-tag.tsx | 68 +++-------- .../variable/var-reference-picker.tsx | 18 ++- .../variable/var-reference-vars.tsx | 21 ++-- .../variable-label/base/variable-icon.tsx | 28 +++++ .../variable-label/base/variable-label.tsx | 83 +++++++++++++ .../variable-label/base/variable-name.tsx | 30 +++++ .../base/variable-node-label.tsx | 37 ++++++ .../variable/variable-label/hooks.ts | 89 ++++++++++++++ .../variable/variable-label/index.tsx | 5 + .../variable/variable-label/types.ts | 19 +++ .../variable-icon-with-color.tsx | 30 +++++ .../variable-label-in-editor.tsx | 40 +++++++ .../variable-label/variable-label-in-node.tsx | 17 +++ .../variable-label-in-select.tsx | 13 ++ .../variable-label/variable-label-in-text.tsx | 17 +++ .../workflow/nodes/assigner/node.tsx | 42 +++---- .../nodes/document-extractor/node.tsx | 19 ++- .../components/workflow/nodes/end/node.tsx | 47 ++------ .../components/workflow/nodes/http/node.tsx | 2 +- .../components/condition-files-list-value.tsx | 30 ++--- .../if-else/components/condition-value.tsx | 32 ++--- .../workflow/nodes/list-operator/node.tsx | 19 ++- .../components/condition-files-list-value.tsx | 30 ++--- .../nodes/loop/components/condition-value.tsx | 29 ++--- .../components/node-group-item.tsx | 46 ++++---- .../components/node-variable-item.tsx | 111 ------------------ .../workflow/variable-inspect/group.tsx | 14 +-- .../workflow/variable-inspect/right.tsx | 20 ++-- 30 files changed, 595 insertions(+), 483 deletions(-) create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-icon.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-label.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-name.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/index.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/types.ts create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-icon-with-color.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-editor.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-node.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-select.tsx create mode 100644 web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-text.tsx delete mode 100644 web/app/components/workflow/nodes/variable-assigner/components/node-variable-item.tsx diff --git a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx index da5ad84cb1..06b583543a 100644 --- a/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx +++ b/web/app/components/base/prompt-editor/plugins/workflow-variable-block/component.tsx @@ -10,10 +10,6 @@ import { } from 'lexical' import { mergeRegister } from '@lexical/utils' import { useLexicalComposerContext } from '@lexical/react/LexicalComposerContext' -import { - RiErrorWarningFill, - RiMoreLine, -} from '@remixicon/react' import { useReactFlow, useStoreApi } from 'reactflow' import { useSelectOrDelete } from '../../hooks' import type { WorkflowNodesMap } from './node' @@ -22,17 +18,15 @@ import { DELETE_WORKFLOW_VARIABLE_BLOCK_COMMAND, UPDATE_WORKFLOW_NODES_MAP, } from './index' -import cn from '@/utils/classnames' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' -import { VarBlockIcon } from '@/app/components/workflow/block-icon' -import { Line3 } from '@/app/components/base/icons/src/public/common' import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' import Tooltip from '@/app/components/base/tooltip' import { isExceptionVariable } from '@/app/components/workflow/utils' import VarFullPathPanel from '@/app/components/workflow/nodes/_base/components/variable/var-full-path-panel' import { Type } from '@/app/components/workflow/nodes/llm/types' import type { ValueSelector, Var } from '@/app/components/workflow/types' +import { + VariableLabelInEditor, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type WorkflowVariableBlockComponentProps = { nodeKey: string @@ -126,69 +120,22 @@ const WorkflowVariableBlockComponent = ({ }, [node, reactflow, store]) const Item = ( -
    { e.stopPropagation() handleVariableJump() }} + isExceptionVariable={isException} + errorMsg={!variableValid ? t('workflow.errorMsg.invalidVariable') : undefined} + isSelected={isSelected} ref={ref} - > - {!isEnv && !isChatVar && ( -
    - { - node?.type && ( -
    - -
    - ) - } -
    {node?.title}
    - -
    - )} - {isShowAPart && ( -
    - - -
    - )} - -
    - {!isEnv && !isChatVar && } - {isEnv && } - {isChatVar && } -
    {varName}
    - { - !variableValid && ( - - ) - } -
    -
    + notShowFullPath={isShowAPart} + /> ) - if (!variableValid) { - return ( - - {Item} - - ) - } - if (!node) return Item diff --git a/web/app/components/workflow/nodes/_base/components/readonly-input-with-select-var.tsx b/web/app/components/workflow/nodes/_base/components/readonly-input-with-select-var.tsx index 4a4ca454d3..c1927011dc 100644 --- a/web/app/components/workflow/nodes/_base/components/readonly-input-with-select-var.tsx +++ b/web/app/components/workflow/nodes/_base/components/readonly-input-with-select-var.tsx @@ -4,12 +4,10 @@ import React from 'react' import cn from 'classnames' import { useWorkflow } from '../../../hooks' import { BlockEnum } from '../../../types' -import { VarBlockIcon } from '../../../block-icon' -import { getNodeInfoById, isConversationVar, isENV, isSystemVar } from './variable/utils' -import { Line3 } from '@/app/components/base/icons/src/public/common' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' -import { RiMoreLine } from '@remixicon/react' +import { getNodeInfoById, isSystemVar } from './variable/utils' +import { + VariableLabelInText, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type Props = { nodeId: string value: string @@ -42,40 +40,17 @@ const ReadonlyInputWithSelectVar: FC = ({ const value = vars[index].split('.') const isSystem = isSystemVar(value) - const isEnv = isENV(value) - const isChatVar = isConversationVar(value) const node = (isSystem ? startNode : getNodeInfoById(availableNodes, value[0]))?.data - const varName = `${isSystem ? 'sys.' : ''}${value[value.length - 1]}` const isShowAPart = value.length > 2 return ( {str} -
    - {!isEnv && !isChatVar && ( -
    -
    - -
    -
    {node?.title}
    - -
    - )} - {isShowAPart && ( -
    - - -
    - )} -
    - {!isEnv && !isChatVar && } - {isEnv && } - {isChatVar && } -
    {varName}
    -
    -
    +
    ) }) return html diff --git a/web/app/components/workflow/nodes/_base/components/variable-tag.tsx b/web/app/components/workflow/nodes/_base/components/variable-tag.tsx index d73a3d4924..163cbba49e 100644 --- a/web/app/components/workflow/nodes/_base/components/variable-tag.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable-tag.tsx @@ -1,9 +1,6 @@ import { useCallback, useMemo } from 'react' import { useNodes, useReactFlow, useStoreApi } from 'reactflow' -import { capitalize } from 'lodash-es' import { useTranslation } from 'react-i18next' -import { RiErrorWarningFill } from '@remixicon/react' -import { VarBlockIcon } from '@/app/components/workflow/block-icon' import type { CommonNodeType, Node, @@ -11,13 +8,11 @@ import type { VarType, } from '@/app/components/workflow/types' import { BlockEnum } from '@/app/components/workflow/types' -import { Line3 } from '@/app/components/base/icons/src/public/common' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' import { getNodeInfoById, isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' -import Tooltip from '@/app/components/base/tooltip' -import cn from '@/utils/classnames' import { isExceptionVariable } from '@/app/components/workflow/utils' +import { + VariableLabelInSelect, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type VariableTagProps = { valueSelector: ValueSelector @@ -73,51 +68,20 @@ const VariableTag = ({ const { t } = useTranslation() return ( - -
    { - if (e.metaKey || e.ctrlKey) { - e.stopPropagation() - handleVariableJump() - } - }} - > - {(!isEnv && !isChatVar && <> - {node && ( - <> - -
    - {node?.data.title} -
    - - )} - - - )} - {isEnv && } - {isChatVar && } -
    - {variableName} -
    - { - !isShort && varType && ( -
    {capitalize(varType)}
    - ) + { + if (e.metaKey || e.ctrlKey) { + e.stopPropagation() + handleVariableJump() } - {!isValid && } -
    -
    + }} + errorMsg={!isValid ? t('workflow.errorMsg.invalidVariable') : undefined} + isExceptionVariable={isException} + /> ) } diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx index e6f3ce1fa1..0e57db0d8f 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx @@ -23,7 +23,6 @@ import { type CredentialFormSchema, type FormOption, FormTypeEnum } from '@/app/ import { BlockEnum } from '@/app/components/workflow/types' import { VarBlockIcon } from '@/app/components/workflow/block-icon' import { Line3 } from '@/app/components/base/icons/src/public/common' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import { PortalToFollowElem, @@ -44,6 +43,7 @@ import VarFullPathPanel from './var-full-path-panel' import { noop } from 'lodash-es' import { useFetchDynamicOptions } from '@/service/use-plugins' import type { Tool } from '@/app/components/tools/types' +import { VariableIconWithColor } from '@/app/components/workflow/nodes/_base/components/variable/variable-label' const TRIGGER_DEFAULT_WIDTH = 227 @@ -138,7 +138,6 @@ const VarReferencePicker: FC = ({ useEffect(() => { if (triggerRef.current) setTriggerWidth(triggerRef.current.clientWidth) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [triggerRef.current]) const [varKindType, setVarKindType] = useState(defaultVarKindType) @@ -149,7 +148,6 @@ const VarReferencePicker: FC = ({ const [open, setOpen] = useState(false) useEffect(() => { onOpen() - // eslint-disable-next-line react-hooks/exhaustive-deps }, [open]) const hasValue = !isConstant && value.length > 0 @@ -362,6 +360,13 @@ const VarReferencePicker: FC = ({ return schema }, [dynamicOptions]) + const variableCategory = useMemo(() => { + if (isEnv) return 'environment' + if (isChatVar) return 'conversation' + if (isLoopVar) return 'loop' + return 'system' + }, [isEnv, isChatVar, isLoopVar]) + return (
    = ({
    )}
    - {!hasValue && } {isLoading && } - {isEnv && } - {isChatVar && } +
    {varName}
    diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx index 303840d8e7..2c600ba66c 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx @@ -5,7 +5,6 @@ import { useHover } from 'ahooks' import { useTranslation } from 'react-i18next' import cn from '@/utils/classnames' import { type NodeOutPutVar, type ValueSelector, type Var, VarType } from '@/app/components/workflow/types' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import { ChevronRight } from '@/app/components/base/icons/src/vender/line/arrows' import { PortalToFollowElem, @@ -13,7 +12,6 @@ import { PortalToFollowElemTrigger, } from '@/app/components/base/portal-to-follow-elem' import Input from '@/app/components/base/input' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' import { checkKeys } from '@/utils/var' import type { StructuredOutput } from '../../../llm/types' import { Type } from '../../../llm/types' @@ -21,8 +19,8 @@ import PickerStructurePanel from '@/app/components/workflow/nodes/_base/componen import { varTypeToStructType } from './utils' import type { Field } from '@/app/components/workflow/nodes/llm/types' import { FILE_STRUCT } from '@/app/components/workflow/constants' -import { Loop } from '@/app/components/base/icons/src/vender/workflow' import { noop } from 'lodash-es' +import { VariableIconWithColor } from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type ObjectChildrenProps = { nodeId: string @@ -118,7 +116,6 @@ const Item: FC = ({ const open = (isObj || isStructureOutput) && isHovering useEffect(() => { onHovering && onHovering(isHovering) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [isHovering]) const handleChosen = (e: React.MouseEvent) => { e.stopPropagation() @@ -132,6 +129,12 @@ const Item: FC = ({ onChange([nodeId, ...objPath, itemData.variable], itemData) } } + const variableCategory = useMemo(() => { + if (isEnv) return 'environment' + if (isChatVar) return 'conversation' + if (isLoopVar) return 'loop' + return 'system' + }, [isEnv, isChatVar, isSys, isLoopVar]) return ( = ({ onMouseDown={e => e.preventDefault()} >
    - {!isEnv && !isChatVar && !isLoopVar && } - {isEnv && } - {isChatVar && } - {isLoopVar && } + {!isEnv && !isChatVar && (
    {itemData.variable}
    )} @@ -219,11 +222,9 @@ const ObjectChildren: FC = ({ const isHovering = isItemHovering || isChildrenHovering useEffect(() => { onHovering && onHovering(isHovering) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [isHovering]) useEffect(() => { onHovering && onHovering(isItemHovering) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [isItemHovering]) // absolute top-[-2px] return ( diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-icon.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-icon.tsx new file mode 100644 index 0000000000..93f47f794a --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-icon.tsx @@ -0,0 +1,28 @@ +import { memo } from 'react' +import cn from '@/utils/classnames' +import { useVarIcon } from '../hooks' +import type { VarInInspectType } from '@/types/workflow' + +export type VariableIconProps = { + className?: string + variables?: string[] + variableCategory?: VarInInspectType | string +} +const VariableIcon = ({ + className, + variables = [], + variableCategory, +}: VariableIconProps) => { + const VarIcon = useVarIcon(variables, variableCategory) + + return VarIcon && ( + + ) +} + +export default memo(VariableIcon) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-label.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-label.tsx new file mode 100644 index 0000000000..99f080f545 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-label.tsx @@ -0,0 +1,83 @@ +import { memo } from 'react' +import { capitalize } from 'lodash-es' +import { + RiErrorWarningFill, + RiMoreLine, +} from '@remixicon/react' +import type { VariablePayload } from '../types' +import { useVarColor } from '../hooks' +import VariableNodeLabel from './variable-node-label' +import VariableIcon from './variable-icon' +import VariableName from './variable-name' +import cn from '@/utils/classnames' +import Tooltip from '@/app/components/base/tooltip' + +const VariableLabel = ({ + nodeType, + nodeTitle, + variables, + variableType, + className, + errorMsg, + onClick, + isExceptionVariable, + ref, + notShowFullPath, + rightSlot, +}: VariablePayload) => { + const varColorClassName = useVarColor(variables, isExceptionVariable) + return ( +
    + + { + notShowFullPath && ( + <> + +
    /
    + + ) + } + + + { + variableType && ( +
    + {capitalize(variableType)} +
    + ) + } + { + !!errorMsg && ( + + + + ) + } + { + rightSlot + } +
    + ) +} + +export default memo(VariableLabel) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-name.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-name.tsx new file mode 100644 index 0000000000..f656b780a5 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-name.tsx @@ -0,0 +1,30 @@ +import { memo } from 'react' +import { useVarName } from '../hooks' +import cn from '@/utils/classnames' + +type VariableNameProps = { + variables: string[] + className?: string + notShowFullPath?: boolean +} +const VariableName = ({ + variables, + className, + notShowFullPath, +}: VariableNameProps) => { + const varName = useVarName(variables, notShowFullPath) + + return ( +
    + {varName} +
    + ) +} + +export default memo(VariableName) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx new file mode 100644 index 0000000000..e4b0e52866 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx @@ -0,0 +1,37 @@ +import { memo } from 'react' +import { VarBlockIcon } from '@/app/components/workflow/block-icon' +import type { BlockEnum } from '@/app/components/workflow/types' + +type VariableNodeLabelProps = { + nodeType?: BlockEnum + nodeTitle?: string +} +const VariableNodeLabel = ({ + nodeType, + nodeTitle, +}: VariableNodeLabelProps) => { + if (!nodeType) + return null + + return ( + <> + + { + nodeTitle && ( +
    + {nodeTitle} +
    + ) + } +
    /
    + + ) +} + +export default memo(VariableNodeLabel) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts b/web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts new file mode 100644 index 0000000000..14ca87903b --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts @@ -0,0 +1,89 @@ +import { useMemo } from 'react' +import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' +import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' +import { Loop } from '@/app/components/base/icons/src/vender/workflow' +import { + isConversationVar, + isENV, + isSystemVar, +} from '../utils' +import { VarInInspectType } from '@/types/workflow' + +export const useVarIcon = (variables: string[], variableCategory?: VarInInspectType | string) => { + if (variableCategory === 'loop') + return Loop + + if (isENV(variables) || variableCategory === VarInInspectType.environment || variableCategory === 'environment') + return Env + + if (isConversationVar(variables) || variableCategory === VarInInspectType.conversation || variableCategory === 'conversation') + return BubbleX + + return Variable02 +} + +export const useVarColor = (variables: string[], isExceptionVariable?: boolean, variableCategory?: VarInInspectType | string) => { + return useMemo(() => { + if (isExceptionVariable) + return 'text-text-warning' + + if (variableCategory === 'loop') + return 'text-util-colors-cyan-cyan-500' + + if (isENV(variables) || variableCategory === VarInInspectType.environment || variableCategory === 'environment') + return 'text-util-colors-violet-violet-600' + + if (isConversationVar(variables) || variableCategory === VarInInspectType.conversation || variableCategory === 'conversation') + return 'text-util-colors-teal-teal-700' + + return 'text-text-accent' + }, [variables, isExceptionVariable]) +} + +export const useVarName = (variables: string[], notShowFullPath?: boolean) => { + const variableFullPathName = variables.slice(1).join('.') + const variablesLength = variables.length + const varName = useMemo(() => { + const isSystem = isSystemVar(variables) + const varName = notShowFullPath ? variables[variablesLength - 1] : variableFullPathName + return `${isSystem ? 'sys.' : ''}${varName}` + }, [variables, notShowFullPath]) + + return varName +} + +export const useVarBgColorInEditor = (variables: string[], hasError?: boolean) => { + if (hasError) { + return { + hoverBorderColor: 'hover:border-state-destructive-active', + hoverBgColor: 'hover:bg-state-destructive-hover', + selectedBorderColor: '!border-state-destructive-solid', + selectedBgColor: '!bg-state-destructive-hover', + } + } + + if (isENV(variables)) { + return { + hoverBorderColor: 'hover:border-util-colors-violet-violet-100', + hoverBgColor: 'hover:bg-util-colors-violet-violet-50', + selectedBorderColor: 'border-util-colors-violet-violet-600', + selectedBgColor: 'bg-util-colors-violet-violet-50', + } + } + + if (isConversationVar(variables)) { + return { + hoverBorderColor: 'hover:border-util-colors-teal-teal-100', + hoverBgColor: 'hover:bg-util-colors-teal-teal-50', + selectedBorderColor: 'border-util-colors-teal-teal-600', + selectedBgColor: 'bg-util-colors-teal-teal-50', + } + } + + return { + hoverBorderColor: 'hover:border-state-accent-alt', + hoverBgColor: 'hover:bg-state-accent-hover', + selectedBorderColor: 'border-state-accent-solid', + selectedBgColor: 'bg-state-accent-hover', + } +} diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/index.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/index.tsx new file mode 100644 index 0000000000..012522e0aa --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/index.tsx @@ -0,0 +1,5 @@ +export { default as VariableLabelInSelect } from './variable-label-in-select' +export { default as VariableLabelInEditor } from './variable-label-in-editor' +export { default as VariableLabelInNode } from './variable-label-in-node' +export { default as VariableLabelInText } from './variable-label-in-text' +export { default as VariableIconWithColor } from './variable-icon-with-color' diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/types.ts b/web/app/components/workflow/nodes/_base/components/variable/variable-label/types.ts new file mode 100644 index 0000000000..6f3b06f6ee --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/types.ts @@ -0,0 +1,19 @@ +import type { ReactNode } from 'react' +import type { + BlockEnum, + VarType, +} from '@/app/components/workflow/types' + +export type VariablePayload = { + className?: string + nodeType?: BlockEnum + nodeTitle?: string + variables: string[] + variableType?: VarType + onClick?: (e: React.MouseEvent) => void + errorMsg?: string + isExceptionVariable?: boolean + ref?: React.Ref + notShowFullPath?: boolean + rightSlot?: ReactNode +} diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-icon-with-color.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-icon-with-color.tsx new file mode 100644 index 0000000000..56d6c3738e --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-icon-with-color.tsx @@ -0,0 +1,30 @@ +import { memo } from 'react' +import VariableIcon from './base/variable-icon' +import type { VariableIconProps } from './base/variable-icon' +import { useVarColor } from './hooks' +import cn from '@/utils/classnames' + +type VariableIconWithColorProps = { + isExceptionVariable?: boolean +} & VariableIconProps + +const VariableIconWithColor = ({ + isExceptionVariable, + variableCategory, + variables = [], + className, +}: VariableIconWithColorProps) => { + const varColorClassName = useVarColor(variables, isExceptionVariable, variableCategory) + return ( + + ) +} + +export default memo(VariableIconWithColor) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-editor.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-editor.tsx new file mode 100644 index 0000000000..fa5ae57f91 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-editor.tsx @@ -0,0 +1,40 @@ +import { memo } from 'react' +import type { VariablePayload } from './types' +import VariableLabel from './base/variable-label' +import { useVarBgColorInEditor } from './hooks' +import cn from '@/utils/classnames' + +type VariableLabelInEditorProps = { + isSelected?: boolean +} & VariablePayload +const VariableLabelInEditor = ({ + isSelected, + variables, + errorMsg, + ...rest +}: VariableLabelInEditorProps) => { + const { + hoverBorderColor, + hoverBgColor, + selectedBorderColor, + selectedBgColor, + } = useVarBgColorInEditor(variables, !!errorMsg) + + return ( + + ) +} + +export default memo(VariableLabelInEditor) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-node.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-node.tsx new file mode 100644 index 0000000000..cebe140e26 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-node.tsx @@ -0,0 +1,17 @@ +import { memo } from 'react' +import type { VariablePayload } from './types' +import VariableLabel from './base/variable-label' +import cn from '@/utils/classnames' + +const VariableLabelInNode = (variablePayload: VariablePayload) => { + return ( + + ) +} + +export default memo(VariableLabelInNode) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-select.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-select.tsx new file mode 100644 index 0000000000..34e7b5f461 --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-select.tsx @@ -0,0 +1,13 @@ +import { memo } from 'react' +import type { VariablePayload } from './types' +import VariableLabel from './base/variable-label' + +const VariableLabelInSelect = (variablePayload: VariablePayload) => { + return ( + + ) +} + +export default memo(VariableLabelInSelect) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-text.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-text.tsx new file mode 100644 index 0000000000..dd0d6fcf8b --- /dev/null +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-text.tsx @@ -0,0 +1,17 @@ +import { memo } from 'react' +import type { VariablePayload } from './types' +import VariableLabel from './base/variable-label' +import cn from '@/utils/classnames' + +const VariableLabelInText = (variablePayload: VariablePayload) => { + return ( + + ) +} + +export default memo(VariableLabelInText) diff --git a/web/app/components/workflow/nodes/assigner/node.tsx b/web/app/components/workflow/nodes/assigner/node.tsx index 2dd1ead4f8..5e5950d715 100644 --- a/web/app/components/workflow/nodes/assigner/node.tsx +++ b/web/app/components/workflow/nodes/assigner/node.tsx @@ -2,10 +2,13 @@ import type { FC } from 'react' import React from 'react' import { useNodes } from 'reactflow' import { useTranslation } from 'react-i18next' -import NodeVariableItem from '../variable-assigner/components/node-variable-item' import type { AssignerNodeType } from './types' -import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' +import { isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' import { BlockEnum, type Node, type NodeProps } from '@/app/components/workflow/types' +import { + VariableLabelInNode, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' +import Badge from '@/app/components/base/badge' const i18nPrefix = 'workflow.nodes.assigner' @@ -38,19 +41,16 @@ const NodeComponent: FC> = ({ if (!variable || variable.length === 0) return null const isSystem = isSystemVar(variable) - const isEnv = isENV(variable) - const isChatVar = isConversationVar(variable) const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) - const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') return ( - + } /> ) })} @@ -63,21 +63,17 @@ const NodeComponent: FC> = ({ if (!variable || variable.length === 0) return null const isSystem = isSystemVar(variable) - const isEnv = isENV(variable) - const isChatVar = isConversationVar(variable) - const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) - const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') return (
    - + } />
    ) diff --git a/web/app/components/workflow/nodes/document-extractor/node.tsx b/web/app/components/workflow/nodes/document-extractor/node.tsx index 6b1d4343be..ab7fe9a9a6 100644 --- a/web/app/components/workflow/nodes/document-extractor/node.tsx +++ b/web/app/components/workflow/nodes/document-extractor/node.tsx @@ -2,10 +2,12 @@ import type { FC } from 'react' import React from 'react' import { useNodes } from 'reactflow' import { useTranslation } from 'react-i18next' -import NodeVariableItem from '../variable-assigner/components/node-variable-item' import type { DocExtractorNodeType } from './types' -import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' +import { isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' import { BlockEnum, type Node, type NodeProps } from '@/app/components/workflow/types' +import { + VariableLabelInNode, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' const i18nPrefix = 'workflow.nodes.docExtractor' @@ -21,19 +23,14 @@ const NodeComponent: FC> = ({ return null const isSystem = isSystemVar(variable) - const isEnv = isENV(variable) - const isChatVar = isConversationVar(variable) const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) - const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') return (
    {t(`${i18nPrefix}.inputVar`)}
    -
    ) diff --git a/web/app/components/workflow/nodes/end/node.tsx b/web/app/components/workflow/nodes/end/node.tsx index 6906e0f77c..2583e61b68 100644 --- a/web/app/components/workflow/nodes/end/node.tsx +++ b/web/app/components/workflow/nodes/end/node.tsx @@ -1,19 +1,16 @@ import type { FC } from 'react' import React from 'react' -import cn from 'classnames' import type { EndNodeType } from './types' import type { NodeProps, Variable } from '@/app/components/workflow/types' -import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' import { useIsChatMode, useWorkflow, useWorkflowVariables, } from '@/app/components/workflow/hooks' -import { VarBlockIcon } from '@/app/components/workflow/block-icon' -import { Line3 } from '@/app/components/base/icons/src/public/common' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' import { BlockEnum } from '@/app/components/workflow/types' +import { + VariableLabelInNode, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' const Node: FC> = ({ id, @@ -42,42 +39,20 @@ const Node: FC> = ({
    {filteredOutputs.map(({ value_selector }, index) => { const node = getNode(value_selector[0]) - const isSystem = isSystemVar(value_selector) - const isEnv = isENV(value_selector) - const isChatVar = isConversationVar(value_selector) - const varName = isSystem ? `sys.${value_selector[value_selector.length - 1]}` : value_selector[value_selector.length - 1] const varType = getCurrentVariableType({ valueSelector: value_selector, availableNodes, isChatMode, }) - return ( -
    -
    - {!isEnv && !isChatVar && ( - <> -
    - -
    -
    {node?.data.title}
    - - - )} -
    - {!isEnv && !isChatVar && } - {isEnv && } - {isChatVar && } -
    {varName}
    -
    -
    -
    -
    {varType}
    -
    -
    + return ( + ) })} diff --git a/web/app/components/workflow/nodes/http/node.tsx b/web/app/components/workflow/nodes/http/node.tsx index aa1912bd59..6002bf737d 100644 --- a/web/app/components/workflow/nodes/http/node.tsx +++ b/web/app/components/workflow/nodes/http/node.tsx @@ -15,7 +15,7 @@ const Node: FC> = ({
    {method}
    -
    +
    { const notHasValue = comparisonOperatorNotRequireValue(c.comparison_operator) if (notHasValue) @@ -76,19 +72,11 @@ const ConditionValue = ({ return (
    - {!isEnvVar && !isChatVar && } - {isEnvVar && } - {isChatVar && } - -
    - {variableName} -
    +
    | undefined = nodes.find(n => n.id === variableSelector[0]) as Node const isException = isExceptionVariable(variableName, node?.data.type) const formatValue = useMemo(() => { @@ -76,20 +74,14 @@ const ConditionValue = ({ return (
    - {!isEnvVar && !isChatVar && } - {isEnvVar && } - {isChatVar && } - -
    - {variableName} -
    +
    > = ({ return null const isSystem = isSystemVar(variable) - const isEnv = isENV(variable) - const isChatVar = isConversationVar(variable) const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) - const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') return (
    {t(`${i18nPrefix}.inputVar`)}
    -
    ) diff --git a/web/app/components/workflow/nodes/loop/components/condition-files-list-value.tsx b/web/app/components/workflow/nodes/loop/components/condition-files-list-value.tsx index 772b960953..00eec93de3 100644 --- a/web/app/components/workflow/nodes/loop/components/condition-files-list-value.tsx +++ b/web/app/components/workflow/nodes/loop/components/condition-files-list-value.tsx @@ -11,10 +11,10 @@ import { } from '../utils' import type { ValueSelector } from '../../../types' import { FILE_TYPE_OPTIONS, TRANSFER_METHOD } from './../default' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' -import cn from '@/utils/classnames' -import { isConversationVar, isENV, isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' +import { isSystemVar } from '@/app/components/workflow/nodes/_base/components/variable/utils' +import { + VariableLabelInNode, +} from '@/app/components/workflow/nodes/_base/components/variable/variable-label' const i18nPrefix = 'workflow.nodes.ifElse' type ConditionValueProps = { @@ -32,11 +32,7 @@ const ConditionValue = ({ const variableSelector = variable_selector as ValueSelector - const variableName = (isSystemVar(variableSelector) ? variableSelector.slice(0).join('.') : variableSelector.slice(1).join('.')) const operatorName = isComparisonOperatorNeedTranslate(operator) ? t(`workflow.nodes.ifElse.comparisonOperator.${operator}`) : operator - const notHasValue = comparisonOperatorNotRequireValue(operator) - const isEnvVar = isENV(variableSelector) - const isChatVar = isConversationVar(variableSelector) const formatValue = useCallback((c: Condition) => { const notHasValue = comparisonOperatorNotRequireValue(c.comparison_operator) if (notHasValue) @@ -76,19 +72,11 @@ const ConditionValue = ({ return (
    - {!isEnvVar && !isChatVar && } - {isEnvVar && } - {isChatVar && } - -
    - {variableName} -
    +
    { const { t } = useTranslation() - const variableName = labelName || (isSystemVar(variableSelector) ? variableSelector.slice(0).join('.') : variableSelector.slice(1).join('.')) const operatorName = isComparisonOperatorNeedTranslate(operator) ? t(`workflow.nodes.ifElse.comparisonOperator.${operator}`) : operator const notHasValue = comparisonOperatorNotRequireValue(operator) - const isEnvVar = isENV(variableSelector) - const isChatVar = isConversationVar(variableSelector) const formatValue = useMemo(() => { if (notHasValue) return '' @@ -67,19 +64,11 @@ const ConditionValue = ({ return (
    - {!isEnvVar && !isChatVar && } - {isEnvVar && } - {isChatVar && } - -
    - {variableName} -
    +
    { - const isSystem = isSystemVar(variable) - const isEnv = isENV(variable) - const isChatVar = isConversationVar(variable) + !!item.variables.length && ( +
    + { + item.variables.map((variable = [], index) => { + const isSystem = isSystemVar(variable) - const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) - const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') - const isException = isExceptionVariable(varName, node?.data.type) + const node = isSystem ? nodes.find(node => node.data.type === BlockEnum.Start) : nodes.find(node => node.id === variable[0]) + const varName = isSystem ? `sys.${variable[variable.length - 1]}` : variable.slice(1).join('.') + const isException = isExceptionVariable(varName, node?.data.type) - return ( - - ) - }) + return ( + + ) + }) + } +
    + ) }
    ) diff --git a/web/app/components/workflow/nodes/variable-assigner/components/node-variable-item.tsx b/web/app/components/workflow/nodes/variable-assigner/components/node-variable-item.tsx deleted file mode 100644 index f5d05aae26..0000000000 --- a/web/app/components/workflow/nodes/variable-assigner/components/node-variable-item.tsx +++ /dev/null @@ -1,111 +0,0 @@ -import { - memo, - useMemo, -} from 'react' -import { useTranslation } from 'react-i18next' -import cn from '@/utils/classnames' -import { VarBlockIcon } from '@/app/components/workflow/block-icon' -import { Line3 } from '@/app/components/base/icons/src/public/common' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' -import Badge from '@/app/components/base/badge' -import type { Node } from '@/app/components/workflow/types' - -type NodeVariableItemProps = { - isEnv: boolean - isChatVar: boolean - node: Node - varName: string - writeMode?: string - showBorder?: boolean - className?: string - isException?: boolean -} - -const i18nPrefix = 'workflow.nodes.assigner' - -const NodeVariableItem = ({ - isEnv, - isChatVar, - node, - varName, - writeMode, - showBorder, - className, - isException, -}: NodeVariableItemProps) => { - const { t } = useTranslation() - - const VariableIcon = useMemo(() => { - if (isEnv) { - return ( - - ) - } - - if (isChatVar) { - return ( - - ) - } - - return ( - - ) - }, [isEnv, isChatVar, isException]) - - const VariableName = useMemo(() => { - return ( -
    - {varName} -
    - ) - }, [isEnv, isChatVar, varName, isException]) - return ( -
    -
    - { - node && ( - <> -
    - -
    -
    - {node?.data.title} -
    - - - ) - } - {VariableIcon} - {VariableName} -
    - {writeMode && } -
    - ) -} - -export default memo(NodeVariableItem) diff --git a/web/app/components/workflow/variable-inspect/group.tsx b/web/app/components/workflow/variable-inspect/group.tsx index 1b032c8992..29b6c3ca44 100644 --- a/web/app/components/workflow/variable-inspect/group.tsx +++ b/web/app/components/workflow/variable-inspect/group.tsx @@ -11,16 +11,12 @@ import { import ActionButton from '@/app/components/base/action-button' import Tooltip from '@/app/components/base/tooltip' import BlockIcon from '@/app/components/workflow/block-icon' -import { - BubbleX, - Env, -} from '@/app/components/base/icons/src/vender/line/others' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import type { currentVarType } from './panel' import { VarInInspectType } from '@/types/workflow' import type { NodeWithVar, VarInInspect } from '@/types/workflow' import cn from '@/utils/classnames' import { useToolIcon } from '../hooks' +import { VariableIconWithColor } from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type Props = { nodeData?: NodeWithVar @@ -158,9 +154,11 @@ const Group = ({ )} onClick={() => handleSelectVar(varItem, varType)} > - {isEnv && } - {isChatVar && } - {(isSystem || nodeData) && } +
    {varItem.name}
    {varItem.value_type}
    diff --git a/web/app/components/workflow/variable-inspect/right.tsx b/web/app/components/workflow/variable-inspect/right.tsx index 6ddd0d47d3..aa318cfe79 100644 --- a/web/app/components/workflow/variable-inspect/right.tsx +++ b/web/app/components/workflow/variable-inspect/right.tsx @@ -14,12 +14,11 @@ import Badge from '@/app/components/base/badge' import CopyFeedback from '@/app/components/base/copy-feedback' import Tooltip from '@/app/components/base/tooltip' import BlockIcon from '@/app/components/workflow/block-icon' -import { BubbleX, Env } from '@/app/components/base/icons/src/vender/line/others' -import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import Loading from '@/app/components/base/loading' import type { currentVarType } from './panel' import { VarInInspectType } from '@/types/workflow' import cn from '@/utils/classnames' +import { VariableIconWithColor } from '@/app/components/workflow/nodes/_base/components/variable/variable-label' type Props = { currentNodeVar?: currentVarType @@ -86,15 +85,14 @@ const Right = ({
    {currentNodeVar && ( <> - {currentNodeVar.nodeType === VarInInspectType.environment && ( - - )} - {currentNodeVar.nodeType === VarInInspectType.conversation && ( - - )} - {currentNodeVar.nodeType === VarInInspectType.system && ( - - )} + { + [VarInInspectType.environment, VarInInspectType.conversation, VarInInspectType.system].includes(currentNodeVar.nodeType as VarInInspectType) && ( + + ) + } {currentNodeVar.nodeType !== VarInInspectType.environment && currentNodeVar.nodeType !== VarInInspectType.conversation && currentNodeVar.nodeType !== VarInInspectType.system && ( <> Date: Fri, 1 Aug 2025 17:37:22 +0800 Subject: [PATCH 104/415] fix: type for ProviderConfig.default (#23282) Signed-off-by: jingfelix --- api/core/entities/provider_entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/entities/provider_entities.py b/api/core/entities/provider_entities.py index 2a0751a5ee..a5a6e62bd7 100644 --- a/api/core/entities/provider_entities.py +++ b/api/core/entities/provider_entities.py @@ -176,7 +176,7 @@ class ProviderConfig(BasicProviderConfig): scope: AppSelectorScope | ModelSelectorScope | ToolSelectorScope | None = None required: bool = False - default: Optional[Union[int, str]] = None + default: Optional[Union[int, str, float, bool]] = None options: Optional[list[Option]] = None label: Optional[I18nObject] = None help: Optional[I18nObject] = None From be914438a52e70410cd798ee936c21fb28abac73 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Fri, 1 Aug 2025 22:46:50 +0800 Subject: [PATCH 105/415] Fix: incorrect array element validation in SegmentType (#23289) --- api/core/variables/types.py | 4 +-- .../core/variables/test_segment_type.py | 27 +++++++++++++++++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/api/core/variables/types.py b/api/core/variables/types.py index e79b2410bf..d28fb11401 100644 --- a/api/core/variables/types.py +++ b/api/core/variables/types.py @@ -109,7 +109,7 @@ class SegmentType(StrEnum): elif array_validation == ArrayValidation.FIRST: return element_type.is_valid(value[0]) else: - return all([element_type.is_valid(i, array_validation=ArrayValidation.NONE)] for i in value) + return all(element_type.is_valid(i, array_validation=ArrayValidation.NONE) for i in value) def is_valid(self, value: Any, array_validation: ArrayValidation = ArrayValidation.FIRST) -> bool: """ @@ -152,7 +152,7 @@ class SegmentType(StrEnum): _ARRAY_ELEMENT_TYPES_MAPPING: Mapping[SegmentType, SegmentType] = { - # ARRAY_ANY does not have correpond element type. + # ARRAY_ANY does not have corresponding element type. SegmentType.ARRAY_STRING: SegmentType.STRING, SegmentType.ARRAY_NUMBER: SegmentType.NUMBER, SegmentType.ARRAY_OBJECT: SegmentType.OBJECT, diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index 64d0d8c7e7..b33a83ba77 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -1,4 +1,4 @@ -from core.variables.types import SegmentType +from core.variables.types import ArrayValidation, SegmentType class TestSegmentTypeIsArrayType: @@ -17,7 +17,6 @@ class TestSegmentTypeIsArrayType: value is tested for the is_array_type method. """ # Arrange - all_segment_types = set(SegmentType) expected_array_types = [ SegmentType.ARRAY_ANY, SegmentType.ARRAY_STRING, @@ -58,3 +57,27 @@ class TestSegmentTypeIsArrayType: for seg_type in enum_values: is_array = seg_type.is_array_type() assert isinstance(is_array, bool), f"is_array_type does not return a boolean for segment type {seg_type}" + + +class TestSegmentTypeIsValidArrayValidation: + """ + Test SegmentType.is_valid with array types using different validation strategies. + """ + + def test_array_validation_all_success(self): + value = ["hello", "world", "foo"] + assert SegmentType.ARRAY_STRING.is_valid(value, array_validation=ArrayValidation.ALL) + + def test_array_validation_all_fail(self): + value = ["hello", 123, "world"] + # Should return False, since 123 is not a string + assert not SegmentType.ARRAY_STRING.is_valid(value, array_validation=ArrayValidation.ALL) + + def test_array_validation_first(self): + value = ["hello", 123, None] + assert SegmentType.ARRAY_STRING.is_valid(value, array_validation=ArrayValidation.FIRST) + + def test_array_validation_none(self): + value = [1, 2, 3] + # validation is None, skip + assert SegmentType.ARRAY_STRING.is_valid(value, array_validation=ArrayValidation.NONE) From a4b14fc99275dd574e7ba130a3880e3ccef14081 Mon Sep 17 00:00:00 2001 From: DazzlingSong <1204427087@qq.com> Date: Fri, 1 Aug 2025 22:59:38 +0800 Subject: [PATCH 106/415] fix video and audio extension, keep consistent with the web page. (#23287) Co-authored-by: wangjialei --- api/constants/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/constants/__init__.py b/api/constants/__init__.py index 9e052320ac..c98f4d55c8 100644 --- a/api/constants/__init__.py +++ b/api/constants/__init__.py @@ -9,10 +9,10 @@ DEFAULT_FILE_NUMBER_LIMITS = 3 IMAGE_EXTENSIONS = ["jpg", "jpeg", "png", "webp", "gif", "svg"] IMAGE_EXTENSIONS.extend([ext.upper() for ext in IMAGE_EXTENSIONS]) -VIDEO_EXTENSIONS = ["mp4", "mov", "mpeg", "mpga"] +VIDEO_EXTENSIONS = ["mp4", "mov", "mpeg", "webm"] VIDEO_EXTENSIONS.extend([ext.upper() for ext in VIDEO_EXTENSIONS]) -AUDIO_EXTENSIONS = ["mp3", "m4a", "wav", "webm", "amr"] +AUDIO_EXTENSIONS = ["mp3", "m4a", "wav", "amr", "mpga"] AUDIO_EXTENSIONS.extend([ext.upper() for ext in AUDIO_EXTENSIONS]) From bc18d4d1b9a3b2ed5c819ec4c64da91551189626 Mon Sep 17 00:00:00 2001 From: liujin-patsnap <134919030+patsnap-liujin@users.noreply.github.com> Date: Sat, 2 Aug 2025 03:36:04 +0800 Subject: [PATCH 107/415] Fix: Correct file variable handling for custom tools in workflow (#18427) --- api/core/workflow/nodes/tool/tool_node.py | 27 +++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 4c8e13de70..df89b2476d 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -318,6 +318,33 @@ class ToolNode(BaseNode): json.append(message.message.json_object) elif message.type == ToolInvokeMessage.MessageType.LINK: assert isinstance(message.message, ToolInvokeMessage.TextMessage) + + if message.meta: + transfer_method = message.meta.get("transfer_method", FileTransferMethod.TOOL_FILE) + else: + transfer_method = FileTransferMethod.TOOL_FILE + + tool_file_id = message.message.text.split("/")[-1].split(".")[0] + + with Session(db.engine) as session: + stmt = select(ToolFile).where(ToolFile.id == tool_file_id) + tool_file = session.scalar(stmt) + if tool_file is None: + raise ToolFileError(f"Tool file {tool_file_id} does not exist") + + mapping = { + "tool_file_id": tool_file_id, + "type": file_factory.get_file_type_by_mime_type(tool_file.mimetype), + "transfer_method": transfer_method, + "url": message.message.text, + } + + file = file_factory.build_from_mapping( + mapping=mapping, + tenant_id=self.tenant_id, + ) + files.append(file) + stream_text = f"Link: {message.message.text}\n" text += stream_text yield RunStreamChunkEvent(chunk_content=stream_text, from_variable_selector=[node_id, "text"]) From 46ba0a8781b698cbbcf456002818980fc7d69785 Mon Sep 17 00:00:00 2001 From: XiaoXiaoJiangYun <33343036+XiaoXiaoJiangYun@users.noreply.github.com> Date: Sat, 2 Aug 2025 12:14:43 +0800 Subject: [PATCH 108/415] Update metadata_service.py (#23271) --- api/services/metadata_service.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/api/services/metadata_service.py b/api/services/metadata_service.py index cfcb121153..59a30040e8 100644 --- a/api/services/metadata_service.py +++ b/api/services/metadata_service.py @@ -79,7 +79,10 @@ class MetadataService: document_ids = [binding.document_id for binding in dataset_metadata_bindings] documents = DocumentService.get_document_by_ids(document_ids) for document in documents: - doc_metadata = copy.deepcopy(document.doc_metadata) + if not document.doc_metadata: + doc_metadata = {} + else: + doc_metadata = copy.deepcopy(document.doc_metadata) value = doc_metadata.pop(old_name, None) doc_metadata[name] = value document.doc_metadata = doc_metadata @@ -109,7 +112,10 @@ class MetadataService: document_ids = [binding.document_id for binding in dataset_metadata_bindings] documents = DocumentService.get_document_by_ids(document_ids) for document in documents: - doc_metadata = copy.deepcopy(document.doc_metadata) + if not document.doc_metadata: + doc_metadata = {} + else: + doc_metadata = copy.deepcopy(document.doc_metadata) doc_metadata.pop(metadata.name, None) document.doc_metadata = doc_metadata db.session.add(document) @@ -172,7 +178,10 @@ class MetadataService: document_ids = [] if documents: for document in documents: - doc_metadata = copy.deepcopy(document.doc_metadata) + if not document.doc_metadata: + doc_metadata = {} + else: + doc_metadata = copy.deepcopy(document.doc_metadata) doc_metadata.pop(BuiltInField.document_name.value, None) doc_metadata.pop(BuiltInField.uploader.value, None) doc_metadata.pop(BuiltInField.upload_date.value, None) From b2c8718f358107ae4d63e11b4132c8b97ebf4570 Mon Sep 17 00:00:00 2001 From: XiaoXiaoJiangYun <33343036+XiaoXiaoJiangYun@users.noreply.github.com> Date: Sat, 2 Aug 2025 12:15:23 +0800 Subject: [PATCH 109/415] Update metadata_service.py (#23272) --- api/services/metadata_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/services/metadata_service.py b/api/services/metadata_service.py index 59a30040e8..2a83588f41 100644 --- a/api/services/metadata_service.py +++ b/api/services/metadata_service.py @@ -143,7 +143,6 @@ class MetadataService: lock_key = f"dataset_metadata_lock_{dataset.id}" try: MetadataService.knowledge_base_metadata_lock_check(dataset.id, None) - dataset.built_in_field_enabled = True db.session.add(dataset) documents = DocumentService.get_working_documents_by_dataset_id(dataset.id) if documents: @@ -159,6 +158,7 @@ class MetadataService: doc_metadata[BuiltInField.source.value] = MetadataDataSource[document.data_source_type].value document.doc_metadata = doc_metadata db.session.add(document) + dataset.built_in_field_enabled = True db.session.commit() except Exception: logging.exception("Enable built-in field failed") @@ -172,7 +172,6 @@ class MetadataService: lock_key = f"dataset_metadata_lock_{dataset.id}" try: MetadataService.knowledge_base_metadata_lock_check(dataset.id, None) - dataset.built_in_field_enabled = False db.session.add(dataset) documents = DocumentService.get_working_documents_by_dataset_id(dataset.id) document_ids = [] @@ -190,6 +189,7 @@ class MetadataService: document.doc_metadata = doc_metadata db.session.add(document) document_ids.append(document.id) + dataset.built_in_field_enabled = False db.session.commit() except Exception: logging.exception("Disable built-in field failed") From f9b3cd1b68568b059af892a110d01777234b5a9e Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sat, 2 Aug 2025 12:52:12 +0800 Subject: [PATCH 110/415] i18n: enhance check-i18n script with precise filtering and multiline support (#23298) --- web/__tests__/check-i18n.test.ts | 198 ++++++++++++++++++++++++++++- web/i18n-config/check-i18n.js | 62 +++++++-- web/i18n/de-DE/app-annotation.ts | 10 ++ web/i18n/es-ES/app-annotation.ts | 10 ++ web/i18n/fa-IR/app-annotation.ts | 10 ++ web/i18n/fa-IR/app.ts | 14 -- web/i18n/fr-FR/app-annotation.ts | 10 ++ web/i18n/fr-FR/app.ts | 14 -- web/i18n/hi-IN/app-annotation.ts | 10 ++ web/i18n/hi-IN/app-log.ts | 6 - web/i18n/hi-IN/app.ts | 14 -- web/i18n/it-IT/app-annotation.ts | 10 ++ web/i18n/it-IT/app.ts | 20 --- web/i18n/ko-KR/app-annotation.ts | 10 ++ web/i18n/pl-PL/app-annotation.ts | 10 ++ web/i18n/pl-PL/app.ts | 20 --- web/i18n/pt-BR/app-annotation.ts | 10 ++ web/i18n/pt-BR/app-log.ts | 5 - web/i18n/pt-BR/app.ts | 14 -- web/i18n/ro-RO/app-annotation.ts | 10 ++ web/i18n/ro-RO/app.ts | 14 -- web/i18n/ru-RU/app-annotation.ts | 10 ++ web/i18n/ru-RU/app.ts | 14 -- web/i18n/sl-SI/app-annotation.ts | 10 ++ web/i18n/sl-SI/app.ts | 14 -- web/i18n/th-TH/app-annotation.ts | 10 ++ web/i18n/th-TH/app.ts | 14 -- web/i18n/tr-TR/app-annotation.ts | 10 ++ web/i18n/tr-TR/app.ts | 14 -- web/i18n/uk-UA/app-annotation.ts | 10 ++ web/i18n/uk-UA/app.ts | 14 -- web/i18n/vi-VN/app-annotation.ts | 10 ++ web/i18n/vi-VN/app-log.ts | 2 - web/i18n/vi-VN/app.ts | 14 -- web/i18n/zh-Hant/app-annotation.ts | 9 ++ 35 files changed, 420 insertions(+), 216 deletions(-) diff --git a/web/__tests__/check-i18n.test.ts b/web/__tests__/check-i18n.test.ts index 3bde095f4b..b4c4f1540d 100644 --- a/web/__tests__/check-i18n.test.ts +++ b/web/__tests__/check-i18n.test.ts @@ -265,7 +265,6 @@ export default translation fs.writeFileSync(path.join(testZhDir, 'pages.ts'), file2Content) const allEnKeys = await getKeysFromLanguage('en-US') - const allZhKeys = await getKeysFromLanguage('zh-Hans') // Test file filtering logic const targetFile = 'components' @@ -563,4 +562,201 @@ export default translation expect(enKeys.length - zhKeysExtra.length).toBe(-2) // -2 means 2 extra keys }) }) + + describe('Auto-remove multiline key-value pairs', () => { + // Helper function to simulate removeExtraKeysFromFile logic + function removeExtraKeysFromFile(content: string, keysToRemove: string[]): string { + const lines = content.split('\n') + const linesToRemove: number[] = [] + + for (const keyToRemove of keysToRemove) { + let targetLineIndex = -1 + const linesToRemoveForKey: number[] = [] + + // Find the key line (simplified for single-level keys in test) + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + const keyPattern = new RegExp(`^\\s*${keyToRemove}\\s*:`) + if (keyPattern.test(line)) { + targetLineIndex = i + break + } + } + + if (targetLineIndex !== -1) { + linesToRemoveForKey.push(targetLineIndex) + + // Check if this is a multiline key-value pair + const keyLine = lines[targetLineIndex] + const trimmedKeyLine = keyLine.trim() + + // If key line ends with ":" (not complete value), it's likely multiline + if (trimmedKeyLine.endsWith(':') && !trimmedKeyLine.includes('{') && !trimmedKeyLine.match(/:\s*['"`]/)) { + // Find the value lines that belong to this key + let currentLine = targetLineIndex + 1 + let foundValue = false + + while (currentLine < lines.length) { + const line = lines[currentLine] + const trimmed = line.trim() + + // Skip empty lines + if (trimmed === '') { + currentLine++ + continue + } + + // Check if this line starts a new key (indicates end of current value) + if (trimmed.match(/^\w+\s*:/)) + break + + // Check if this line is part of the value + if (trimmed.startsWith('\'') || trimmed.startsWith('"') || trimmed.startsWith('`') || foundValue) { + linesToRemoveForKey.push(currentLine) + foundValue = true + + // Check if this line ends the value (ends with quote and comma/no comma) + if ((trimmed.endsWith('\',') || trimmed.endsWith('",') || trimmed.endsWith('`,') + || trimmed.endsWith('\'') || trimmed.endsWith('"') || trimmed.endsWith('`')) + && !trimmed.startsWith('//')) + break + } + else { + break + } + + currentLine++ + } + } + + linesToRemove.push(...linesToRemoveForKey) + } + } + + // Remove duplicates and sort in reverse order + const uniqueLinesToRemove = [...new Set(linesToRemove)].sort((a, b) => b - a) + + for (const lineIndex of uniqueLinesToRemove) + lines.splice(lineIndex, 1) + + return lines.join('\n') + } + + it('should remove single-line key-value pairs correctly', () => { + const content = `const translation = { + keepThis: 'This should stay', + removeThis: 'This should be removed', + alsoKeep: 'This should also stay', +} + +export default translation` + + const result = removeExtraKeysFromFile(content, ['removeThis']) + + expect(result).toContain('keepThis: \'This should stay\'') + expect(result).toContain('alsoKeep: \'This should also stay\'') + expect(result).not.toContain('removeThis: \'This should be removed\'') + }) + + it('should remove multiline key-value pairs completely', () => { + const content = `const translation = { + keepThis: 'This should stay', + removeMultiline: + 'This is a multiline value that should be removed completely', + alsoKeep: 'This should also stay', +} + +export default translation` + + const result = removeExtraKeysFromFile(content, ['removeMultiline']) + + expect(result).toContain('keepThis: \'This should stay\'') + expect(result).toContain('alsoKeep: \'This should also stay\'') + expect(result).not.toContain('removeMultiline:') + expect(result).not.toContain('This is a multiline value that should be removed completely') + }) + + it('should handle mixed single-line and multiline removals', () => { + const content = `const translation = { + keepThis: 'Keep this', + removeSingle: 'Remove this single line', + removeMultiline: + 'Remove this multiline value', + anotherMultiline: + 'Another multiline that spans multiple lines', + keepAnother: 'Keep this too', +} + +export default translation` + + const result = removeExtraKeysFromFile(content, ['removeSingle', 'removeMultiline', 'anotherMultiline']) + + expect(result).toContain('keepThis: \'Keep this\'') + expect(result).toContain('keepAnother: \'Keep this too\'') + expect(result).not.toContain('removeSingle:') + expect(result).not.toContain('removeMultiline:') + expect(result).not.toContain('anotherMultiline:') + expect(result).not.toContain('Remove this single line') + expect(result).not.toContain('Remove this multiline value') + expect(result).not.toContain('Another multiline that spans multiple lines') + }) + + it('should properly detect multiline vs single-line patterns', () => { + const multilineContent = `const translation = { + singleLine: 'This is single line', + multilineKey: + 'This is multiline', + keyWithColon: 'Value with: colon inside', + objectKey: { + nested: 'value' + }, +} + +export default translation` + + // Test that single line with colon in value is not treated as multiline + const result1 = removeExtraKeysFromFile(multilineContent, ['keyWithColon']) + expect(result1).not.toContain('keyWithColon:') + expect(result1).not.toContain('Value with: colon inside') + + // Test that true multiline is handled correctly + const result2 = removeExtraKeysFromFile(multilineContent, ['multilineKey']) + expect(result2).not.toContain('multilineKey:') + expect(result2).not.toContain('This is multiline') + + // Test that object key removal works (note: this is a simplified test) + // In real scenario, object removal would be more complex + const result3 = removeExtraKeysFromFile(multilineContent, ['objectKey']) + expect(result3).not.toContain('objectKey: {') + // Note: Our simplified test function doesn't handle nested object removal perfectly + // This is acceptable as it's testing the main multiline string removal functionality + }) + + it('should handle real-world Polish translation structure', () => { + const polishContent = `const translation = { + createApp: 'UTWÓRZ APLIKACJĘ', + newApp: { + captionAppType: 'Jaki typ aplikacji chcesz stworzyć?', + chatbotDescription: + 'Zbuduj aplikację opartą na czacie. Ta aplikacja używa formatu pytań i odpowiedzi.', + agentDescription: + 'Zbuduj inteligentnego agenta, który może autonomicznie wybierać narzędzia.', + basic: 'Podstawowy', + }, +} + +export default translation` + + const result = removeExtraKeysFromFile(polishContent, ['captionAppType', 'chatbotDescription', 'agentDescription']) + + expect(result).toContain('createApp: \'UTWÓRZ APLIKACJĘ\'') + expect(result).toContain('basic: \'Podstawowy\'') + expect(result).not.toContain('captionAppType:') + expect(result).not.toContain('chatbotDescription:') + expect(result).not.toContain('agentDescription:') + expect(result).not.toContain('Jaki typ aplikacji') + expect(result).not.toContain('Zbuduj aplikację opartą na czacie') + expect(result).not.toContain('Zbuduj inteligentnego agenta') + }) + }) }) diff --git a/web/i18n-config/check-i18n.js b/web/i18n-config/check-i18n.js index edc2566a3c..cc55277613 100644 --- a/web/i18n-config/check-i18n.js +++ b/web/i18n-config/check-i18n.js @@ -129,10 +129,11 @@ async function removeExtraKeysFromFile(language, fileName, extraKeys) { let modified = false const linesToRemove = [] - // Find lines to remove for each key + // Find lines to remove for each key (including multiline values) for (const keyToRemove of fileSpecificKeys) { const keyParts = keyToRemove.split('.') let targetLineIndex = -1 + const linesToRemoveForKey = [] // Build regex pattern for the exact key path if (keyParts.length === 1) { @@ -183,8 +184,53 @@ async function removeExtraKeysFromFile(language, fileName, extraKeys) { } if (targetLineIndex !== -1) { - linesToRemove.push(targetLineIndex) - console.log(`🗑️ Found key to remove: ${keyToRemove} at line ${targetLineIndex + 1}`) + linesToRemoveForKey.push(targetLineIndex) + + // Check if this is a multiline key-value pair + const keyLine = lines[targetLineIndex] + const trimmedKeyLine = keyLine.trim() + + // If key line ends with ":" (not ":", "{ " or complete value), it's likely multiline + if (trimmedKeyLine.endsWith(':') && !trimmedKeyLine.includes('{') && !trimmedKeyLine.match(/:\s*['"`]/)) { + // Find the value lines that belong to this key + let currentLine = targetLineIndex + 1 + let foundValue = false + + while (currentLine < lines.length) { + const line = lines[currentLine] + const trimmed = line.trim() + + // Skip empty lines + if (trimmed === '') { + currentLine++ + continue + } + + // Check if this line starts a new key (indicates end of current value) + if (trimmed.match(/^\w+\s*:/)) + break + + // Check if this line is part of the value + if (trimmed.startsWith('\'') || trimmed.startsWith('"') || trimmed.startsWith('`') || foundValue) { + linesToRemoveForKey.push(currentLine) + foundValue = true + + // Check if this line ends the value (ends with quote and comma/no comma) + if ((trimmed.endsWith('\',') || trimmed.endsWith('",') || trimmed.endsWith('`,') + || trimmed.endsWith('\'') || trimmed.endsWith('"') || trimmed.endsWith('`')) + && !trimmed.startsWith('//')) + break + } + else { + break + } + + currentLine++ + } + } + + linesToRemove.push(...linesToRemoveForKey) + console.log(`🗑️ Found key to remove: ${keyToRemove} at line ${targetLineIndex + 1}${linesToRemoveForKey.length > 1 ? ` (multiline, ${linesToRemoveForKey.length} lines)` : ''}`) modified = true } else { @@ -193,10 +239,10 @@ async function removeExtraKeysFromFile(language, fileName, extraKeys) { } if (modified) { - // Remove lines in reverse order to maintain correct indices - linesToRemove.sort((a, b) => b - a) + // Remove duplicates and sort in reverse order to maintain correct indices + const uniqueLinesToRemove = [...new Set(linesToRemove)].sort((a, b) => b - a) - for (const lineIndex of linesToRemove) { + for (const lineIndex of uniqueLinesToRemove) { const line = lines[lineIndex] console.log(`🗑️ Removing line ${lineIndex + 1}: ${line.trim()}`) lines.splice(lineIndex, 1) @@ -237,7 +283,7 @@ async function main() { // Filter target keys by file if specified const targetKeys = targetFile - ? allTargetKeys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase()))) + ? allTargetKeys.filter(key => key.startsWith(`${targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())}.`)) : allTargetKeys // Filter languages by target language if specified @@ -247,7 +293,7 @@ async function main() { // Filter language keys by file if specified const languagesKeys = targetFile - ? allLanguagesKeys.map(keys => keys.filter(key => key.startsWith(targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())))) + ? allLanguagesKeys.map(keys => keys.filter(key => key.startsWith(`${targetFile.replace(/[-_](.)/g, (_, c) => c.toUpperCase())}.`))) : allLanguagesKeys const keysCount = languagesKeys.map(keys => keys.length) diff --git a/web/i18n/de-DE/app-annotation.ts b/web/i18n/de-DE/app-annotation.ts index 2e141ed380..be6f1948a2 100644 --- a/web/i18n/de-DE/app-annotation.ts +++ b/web/i18n/de-DE/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Speichern', }, embeddingModelSwitchTip: 'Anmerkungstext-Vektorisierungsmodell, das Wechseln von Modellen wird neu eingebettet, was zusätzliche Kosten verursacht.', + list: { + delete: { + title: 'Bist du sicher, dass du löschen möchtest?', + }, + }, + batchAction: { + cancel: 'Abbrechen', + selected: 'Ausgewählt', + delete: 'Löschen', + }, } export default translation diff --git a/web/i18n/es-ES/app-annotation.ts b/web/i18n/es-ES/app-annotation.ts index 2a797edcc3..9a25037e18 100644 --- a/web/i18n/es-ES/app-annotation.ts +++ b/web/i18n/es-ES/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Guardar', }, embeddingModelSwitchTip: 'Modelo de vectorización de texto de anotación, cambiar de modelo volverá a incrustar, lo que resultará en costos adicionales.', + list: { + delete: { + title: '¿Estás seguro de que deseas eliminar?', + }, + }, + batchAction: { + delete: 'Eliminar', + selected: 'Seleccionado', + cancel: 'Cancelar', + }, } export default translation diff --git a/web/i18n/fa-IR/app-annotation.ts b/web/i18n/fa-IR/app-annotation.ts index d66c2eb0e5..7bedf8371f 100644 --- a/web/i18n/fa-IR/app-annotation.ts +++ b/web/i18n/fa-IR/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'ذخیره', }, embeddingModelSwitchTip: 'مدل برداری‌سازی متن یادداشت، تغییر مدل‌ها باعث جاسازی مجدد خواهد شد و هزینه‌های اضافی به همراه خواهد داشت.', + list: { + delete: { + title: 'آیا مطمئن هستید که می‌خواهید حذف کنید؟', + }, + }, + batchAction: { + cancel: 'لغو', + selected: 'انتخاب شده', + delete: 'حذف کنید', + }, } export default translation diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts index b2cde413d9..e28aa1946c 100644 --- a/web/i18n/fa-IR/app.ts +++ b/web/i18n/fa-IR/app.ts @@ -31,21 +31,7 @@ const translation = { newApp: { startFromBlank: 'ایجاد از خالی', startFromTemplate: 'ایجاد از قالب', - captionAppType: 'چه نوع برنامه‌ای می‌خواهید ایجاد کنید؟', - chatbotDescription: 'ساخت برنامه‌ای مبتنی بر چت. این برنامه از قالب پرسش و پاسخ استفاده می‌کند و امکان چندین دور مکالمه مداوم را فراهم می‌کند.', - completionDescription: 'ساخت برنامه‌ای که متن با کیفیت بالا بر اساس درخواست‌ها تولید می‌کند، مانند تولید مقالات، خلاصه‌ها، ترجمه‌ها و بیشتر.', - completionWarning: 'این نوع برنامه دیگر پشتیبانی نمی‌شود.', - agentDescription: 'ساخت نماینده هوشمند که می‌تواند ابزارها را برای انجام وظایف به طور خودمختار انتخاب کند', - workflowDescription: 'ساخت برنامه‌ای که متن با کیفیت بالا بر اساس گردش کار با درجه بالای سفارشی‌سازی تولید می‌کند. مناسب برای کاربران با تجربه.', workflowWarning: 'در حال حاضر در نسخه بتا', - chatbotType: 'روش سازماندهی چت‌بات', - basic: 'اساسی', - basicTip: 'برای مبتدیان، می‌توان بعداً به Chatflow تغییر داد', - basicFor: 'برای مبتدیان', - basicDescription: 'سازماندهی اساسی به شما اجازه می‌دهد تا یک برنامه چت‌بات را با تنظیمات ساده و بدون امکان تغییر درخواست‌های داخلی سازماندهی کنید. مناسب برای مبتدیان است.', - advanced: 'Chatflow', - advancedFor: 'برای کاربران پیشرفته', - advancedDescription: 'سازماندهی گردش کار، چت‌بات‌ها را به صورت گردش کار سازماندهی می‌کند و درجه بالایی از سفارشی‌سازی، از جمله امکان ویرایش درخواست‌های داخلی را فراهم می‌کند. مناسب برای کاربران با تجربه است.', captionName: 'آیکون و نام برنامه', appNamePlaceholder: 'به برنامه خود یک نام بدهید', captionDescription: 'توضیحات', diff --git a/web/i18n/fr-FR/app-annotation.ts b/web/i18n/fr-FR/app-annotation.ts index 3a34e326f4..648a1b93cc 100644 --- a/web/i18n/fr-FR/app-annotation.ts +++ b/web/i18n/fr-FR/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Enregistrer', }, embeddingModelSwitchTip: 'Modèle de vectorisation de texte d\'annotation, changer de modèles entraînera une ré-intégration, ce qui entraînera des coûts supplémentaires.', + list: { + delete: { + title: 'Êtes-vous sûr de vouloir supprimer ?', + }, + }, + batchAction: { + cancel: 'Annuler', + delete: 'Supprimer', + selected: 'sélectionné', + }, } export default translation diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts index f572658d12..a34d6a31da 100644 --- a/web/i18n/fr-FR/app.ts +++ b/web/i18n/fr-FR/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Créer à partir de zéro', startFromTemplate: 'Créer à partir d\'un modèle', - captionAppType: 'Quel type d\'application souhaitez-vous créer ?', - chatbotDescription: 'Construisez une application basée sur le chat. Cette application utilise un format question-réponse, permettant ainsi plusieurs tours de conversation continue.', - completionDescription: 'Construisez une application qui génère du texte de haute qualité en fonction des invites, telles que la génération d\'articles, de résumés, de traductions, et plus encore.', - completionWarning: 'Ce type d\'application ne sera plus pris en charge.', - agentDescription: 'Construisez un agent intelligent capable de choisir automatiquement les outils pour accomplir les tâches', - workflowDescription: 'Construisez une application qui génère du texte de haute qualité en fonction d\'un flux de travail avec un haut degré de personnalisation. Il convient aux utilisateurs expérimentés.', workflowWarning: 'Actuellement en version bêta', - chatbotType: 'Méthode d\'orchestration du chatbot', - basic: 'Basique', - basicTip: 'Pour les débutants, peut passer à Chatflow plus tard', - basicFor: 'POUR LES DÉBUTANTS', - basicDescription: 'L\'orchestration de base permet d\'orchestrer une application Chatbot à l\'aide de paramètres simples, sans possibilité de modifier les invites intégrées. Il convient aux débutants.', - advanced: 'Chatflow', - advancedFor: 'Pour les utilisateurs avancés', - advancedDescription: 'L\'orchestration de flux de travail orchestre les Chatbots sous forme de workflows, offrant un haut degré de personnalisation, y compris la possibilité de modifier les invites intégrées. Il convient aux utilisateurs expérimentés.', captionName: 'Icône et nom de l\'application', appNamePlaceholder: 'Donnez un nom à votre application', captionDescription: 'Description', diff --git a/web/i18n/hi-IN/app-annotation.ts b/web/i18n/hi-IN/app-annotation.ts index b89f33c438..51eb14cc55 100644 --- a/web/i18n/hi-IN/app-annotation.ts +++ b/web/i18n/hi-IN/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'सहेजें', }, embeddingModelSwitchTip: 'एनोटेशन टेक्स्ट वेक्टराइजेशन मॉडल, मॉडल बदलने से पुनः एम्बेड किया जाएगा, जिससे अतिरिक्त लागतें उत्पन्न होंगी।', + list: { + delete: { + title: 'क्या आप सुनिश्चित हैं कि हटाएं?', + }, + }, + batchAction: { + selected: 'चुना हुआ', + delete: 'हटाएँ', + cancel: 'रद्द करें', + }, } export default translation diff --git a/web/i18n/hi-IN/app-log.ts b/web/i18n/hi-IN/app-log.ts index 746d558fb9..90bb30e621 100644 --- a/web/i18n/hi-IN/app-log.ts +++ b/web/i18n/hi-IN/app-log.ts @@ -90,12 +90,6 @@ const translation = { viewLog: 'व्यू लॉग', agentLogDetail: { agentMode: 'एजेंट मोड', - startTime: 'शुरू करने का समय', - endTime: 'समाप्ति समय', - duration: 'अवधि', - promptTemplate: 'प्रॉम्प्ट टेम्पलेट', - promptInput: 'प्रॉम्प्ट इनपुट', - response: 'प्रतिक्रिया', iterations: 'पुनरूक्तियाँ', toolUsed: 'प्रयुक्त उपकरण', finalProcessing: 'अंतिम प्रसंस्करण', diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index 9b13fdc392..fc60901452 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'रिक्त से बनाएँ', startFromTemplate: 'टेम्पलेट से बनाएँ', - captionAppType: 'आप किस प्रकार का ऐप बनाना चाहते हैं?', - chatbotDescription: 'एक चैट-आधारित एप्लिकेशन बनाएं। यह ऐप प्रश्न-उत्तर प्रारूप का उपयोग करता है, जिससे निरंतर बातचीत के कई राउंड संभव होते हैं।', - completionDescription: 'ऐसा एप्लिकेशन बनाएं जो प्रॉम्प्ट्स के आधार पर उच्च गुणवत्ता वाला टेक्स्ट उत्पन्न करता है, जैसे लेख, सारांश, अनुवाद आदि उत्पन्न करना।', - completionWarning: 'इस प्रकार के ऐप का समर्थन नहीं किया जाएगा।', - agentDescription: 'एक बुद्धिमान एजेंट बनाएं जो स्वायत्त रूप से टूल्स का चयन करके कार्य पूरा कर सके।', - workflowDescription: 'एक एप्लिकेशन बनाएं जो वर्कफ़्लो ऑर्केस्ट्रेट्स के साथ उच्च डिग्री के कस्टमाइज़ेशन के साथ उच्च गुणवत्ता वाला टेक्स्ट उत्पन्न करता है। यह अनुभवी उपयोगकर्ताओं के लिए उपयुक्त है।', workflowWarning: 'वर्तमान में बीटा में', - chatbotType: 'चैटबॉट ऑर्केस्ट्रेट विधि', - basic: 'बेसिक', - basicTip: 'शुरुआती लोगों के लिए, बाद में चैटफ़्लो में स्विच कर सकते हैं', - basicFor: 'शुरुआती लोगों के लिए', - basicDescription: 'बेसिक ऑर्केस्ट्रेट चैटबॉट ऐप को सरल सेटिंग्स का उपयोग करके ऑर्केस्ट्रेट करने की अनुमति देता है, बिना अंतर्निहित प्रॉम्प्ट्स को संशोधित करने की क्षमता के। यह शुरुआती लोगों के लिए उपयुक्त है।', - advanced: 'चैटफ्लो', - advancedFor: 'अनुभवी उपयोगकर्ताओं के लिए', - advancedDescription: 'वर्कफ़्लो ऑर्केस्ट्रेट वर्कफ़्लोज़ के रूप में चैटबॉट्स को ऑर्केस्ट्रेट करता है, जिसमें अंतर्निहित प्रॉम्प्ट्स को संपादित करने की क्षमता सहित उच्च डिग्री का कस्टमाइज़ेशन होता है। यह अनुभवी उपयोगकर्ताओं के लिए उपयुक्त है।', captionName: 'ऐप आइकन और नाम', appNamePlaceholder: 'अपने ऐप को नाम दें', captionDescription: 'विवरण', diff --git a/web/i18n/it-IT/app-annotation.ts b/web/i18n/it-IT/app-annotation.ts index bba10ba84e..288068c78e 100644 --- a/web/i18n/it-IT/app-annotation.ts +++ b/web/i18n/it-IT/app-annotation.ts @@ -85,6 +85,16 @@ const translation = { }, embeddingModelSwitchTip: 'Modello di vettorizzazione del testo di annotazione, il cambio di modello comporterà una nuova integrazione, comportando costi aggiuntivi.', + list: { + delete: { + title: 'Sei sicuro di voler eliminare?', + }, + }, + batchAction: { + delete: 'Elimina', + cancel: 'Annulla', + selected: 'selezionato', + }, } export default translation diff --git a/web/i18n/it-IT/app.ts b/web/i18n/it-IT/app.ts index 66cb50b2a0..01ee29423e 100644 --- a/web/i18n/it-IT/app.ts +++ b/web/i18n/it-IT/app.ts @@ -27,27 +27,7 @@ const translation = { newApp: { startFromBlank: 'Crea da zero', startFromTemplate: 'Crea da modello', - captionAppType: 'Che tipo di app vuoi creare?', - chatbotDescription: - 'Crea un\'applicazione basata sulla chat. Questa app utilizza un formato domanda-e-risposta, consentendo più round di conversazione continua.', - completionDescription: - 'Crea un\'applicazione che genera testo di alta qualità basato sui prompt, come articoli, riassunti, traduzioni e altro.', - completionWarning: 'Questo tipo di app non sarà più supportato.', - agentDescription: - 'Crea un Agente intelligente che può scegliere autonomamente gli strumenti per completare i compiti', - workflowDescription: - 'Crea un\'applicazione che genera testo di alta qualità basato su flussi di lavoro orchestrati con un alto grado di personalizzazione. È adatto per utenti esperti.', workflowWarning: 'Attualmente in beta', - chatbotType: 'Metodo di orchestrazione Chatbot', - basic: 'Base', - basicTip: 'Per principianti, può passare a Chatflow in seguito', - basicFor: 'PER PRINCIPIANTI', - basicDescription: - 'L\'Orchestrazione di base consente l\'orchestrazione di un\'app Chatbot utilizzando impostazioni semplici, senza la possibilità di modificare i prompt integrati. È adatta per principianti.', - advanced: 'Chatflow', - advancedFor: 'Per utenti avanzati', - advancedDescription: - 'L\'Orchestrazione del flusso di lavoro orchestra i Chatbot sotto forma di flussi di lavoro, offrendo un alto grado di personalizzazione, inclusa la possibilità di modificare i prompt integrati. È adatta per utenti esperti.', captionName: 'Icona e nome dell\'app', appNamePlaceholder: 'Dai un nome alla tua app', captionDescription: 'Descrizione', diff --git a/web/i18n/ko-KR/app-annotation.ts b/web/i18n/ko-KR/app-annotation.ts index 662dc3f083..7e0cee020b 100644 --- a/web/i18n/ko-KR/app-annotation.ts +++ b/web/i18n/ko-KR/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: '저장', }, embeddingModelSwitchTip: '어노테이션 텍스트의 임베딩 모델입니다. 모델을 변경하면 다시 임베딩되며 추가 비용이 발생합니다.', + list: { + delete: { + title: '삭제할 것인지 확실합니까?', + }, + }, + batchAction: { + cancel: '취소', + delete: '삭제', + selected: '선택됨', + }, } export default translation diff --git a/web/i18n/pl-PL/app-annotation.ts b/web/i18n/pl-PL/app-annotation.ts index 32efc76e66..c0f96a146e 100644 --- a/web/i18n/pl-PL/app-annotation.ts +++ b/web/i18n/pl-PL/app-annotation.ts @@ -85,6 +85,16 @@ const translation = { }, embeddingModelSwitchTip: 'Model wektoryzacji tekstu adnotacji, przełączanie modeli spowoduje ponowne osadzenie, co wiąże się z dodatkowymi kosztami.', + list: { + delete: { + title: 'Czy na pewno chcesz usunąć?', + }, + }, + batchAction: { + selected: 'Wybrany', + delete: 'Usuń', + cancel: 'Anuluj', + }, } export default translation diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts index 9a42b702e7..8751dedc99 100644 --- a/web/i18n/pl-PL/app.ts +++ b/web/i18n/pl-PL/app.ts @@ -27,27 +27,7 @@ const translation = { newApp: { startFromBlank: 'Utwórz od podstaw', startFromTemplate: 'Utwórz z szablonu', - captionAppType: 'Jaki typ aplikacji chcesz stworzyć?', - chatbotDescription: - 'Zbuduj aplikację opartą na czacie. Ta aplikacja używa formatu pytań i odpowiedzi, umożliwiając wielokrotne rundy ciągłej konwersacji.', - completionDescription: - 'Zbuduj aplikację generującą teksty wysokiej jakości na podstawie monitów, takich jak generowanie artykułów, streszczeń, tłumaczeń i innych.', - completionWarning: 'Ten typ aplikacji nie będzie już obsługiwany.', - agentDescription: - 'Zbuduj inteligentnego agenta, który może autonomicznie wybierać narzędzia do wykonywania zadań', - workflowDescription: - 'Zbuduj aplikację, która w oparciu o przepływ pracy generuje teksty wysokiej jakości z dużą możliwością dostosowania. Jest odpowiednia dla doświadczonych użytkowników.', workflowWarning: 'Obecnie w fazie beta', - chatbotType: 'Metoda orkiestracji chatbota', - basic: 'Podstawowy', - basicTip: 'Dla początkujących, można przełączyć się później na Chatflow', - basicFor: 'Dla początkujących', - basicDescription: - 'Podstawowa orkiestracja pozwala na skonfigurowanie aplikacji Chatbot za pomocą prostych ustawień, bez możliwości modyfikacji wbudowanych monitów. Jest odpowiednia dla początkujących.', - advanced: 'Chatflow', - advancedFor: 'Dla zaawansowanych użytkowników', - advancedDescription: - 'Orkiestracja przepływu pracy organizuje Chatboty w formie przepływów pracy, oferując wysoki stopień dostosowania, w tym możliwość edycji wbudowanych monitów. Jest odpowiednia dla doświadczonych użytkowników.', captionName: 'Ikona i nazwa aplikacji', appNamePlaceholder: 'Podaj nazwę swojej aplikacji', captionDescription: 'Opis', diff --git a/web/i18n/pt-BR/app-annotation.ts b/web/i18n/pt-BR/app-annotation.ts index 9e2760bf24..8c1d511f8d 100644 --- a/web/i18n/pt-BR/app-annotation.ts +++ b/web/i18n/pt-BR/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Salvar', }, embeddingModelSwitchTip: 'Modelo de vetorização de texto de anotação, a troca de modelos será refeita, resultando em custos adicionais.', + list: { + delete: { + title: 'Você tem certeza que deseja excluir?', + }, + }, + batchAction: { + cancel: 'Cancelar', + selected: 'Selecionado', + delete: 'Excluir', + }, } export default translation diff --git a/web/i18n/pt-BR/app-log.ts b/web/i18n/pt-BR/app-log.ts index 0b4cbc81e1..ef97f6abff 100644 --- a/web/i18n/pt-BR/app-log.ts +++ b/web/i18n/pt-BR/app-log.ts @@ -87,11 +87,6 @@ const translation = { agentLog: 'Registro do agente', viewLog: 'Ver Registro', agenteLogDetail: { - agentMode: 'Modo Agente', - toolUsed: 'Ferramenta usada', - iterations: 'Iterações', - iteration: 'Iteração', - finalProcessing: 'Processamento Final', }, agentLogDetail: { iterations: 'Iterações', diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts index 6122a75a97..1f44ae9e5a 100644 --- a/web/i18n/pt-BR/app.ts +++ b/web/i18n/pt-BR/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Criar do zero', startFromTemplate: 'Criar do modelo', - captionAppType: 'Que tipo de aplicativo você deseja criar?', - chatbotDescription: 'Construa um aplicativo baseado em chat. Este aplicativo usa um formato de pergunta e resposta, permitindo várias rodadas de conversa contínua.', - completionDescription: 'Construa um aplicativo que gera texto de alta qualidade com base em prompts, como geração de artigos, resumos, traduções e muito mais.', - completionWarning: 'Este tipo de aplicativo não será mais suportado.', - agentDescription: 'Construa um Agente inteligente que pode escolher ferramentas para completar as tarefas autonomamente', - workflowDescription: 'Construa um aplicativo que gera texto de alta qualidade com base em fluxo de trabalho com alto grau de personalização. É adequado para usuários experientes.', workflowWarning: 'Atualmente em beta', - chatbotType: 'Método de orquestração do Chatbot', - basic: 'Básico', - basicTip: 'Para iniciantes, pode mudar para o Chatflow mais tarde', - basicFor: 'PARA INICIANTES', - basicDescription: 'A Orquestração Básica permite orquestrar um aplicativo Chatbot usando configurações simples, sem a capacidade de modificar prompts integrados. É adequado para iniciantes.', - advanced: 'Chatflow', - advancedFor: 'Para usuários avançados', - advancedDescription: 'A Orquestração de Fluxo de Trabalho orquestra Chatbots na forma de fluxos de trabalho, oferecendo um alto grau de personalização, incluindo a capacidade de editar prompts integrados. É adequado para usuários experientes.', captionName: 'Ícone e nome do aplicativo', appNamePlaceholder: 'Dê um nome para o seu aplicativo', captionDescription: 'Descrição', diff --git a/web/i18n/ro-RO/app-annotation.ts b/web/i18n/ro-RO/app-annotation.ts index 67feb9db1f..66c1c3aa29 100644 --- a/web/i18n/ro-RO/app-annotation.ts +++ b/web/i18n/ro-RO/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Salvează', }, embeddingModelSwitchTip: 'Model de vectorizare a textului anotației, schimbarea modelelor va fi reîncorporată, rezultând costuri suplimentare.', + list: { + delete: { + title: 'Ești sigur că vrei să ștergi?', + }, + }, + batchAction: { + cancel: 'Anulează', + delete: 'Șterge', + selected: 'Selectat', + }, } export default translation diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts index d674b4ca82..2559eea20f 100644 --- a/web/i18n/ro-RO/app.ts +++ b/web/i18n/ro-RO/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Creează din Nou', startFromTemplate: 'Creează din Șablon', - captionAppType: 'Ce tip de aplicație vrei să creezi?', - chatbotDescription: 'Construiește o aplicație bazată pe chat. Această aplicație folosește un format întrebare-răspuns, permițând mai multe runde de conversație continuă.', - completionDescription: 'Construiește o aplicație care generează text de înaltă calitate pe baza indicațiilor, cum ar fi generarea de articole, rezumate, traduceri și mai multe.', - completionWarning: 'Acest tip de aplicație nu va mai fi acceptat.', - agentDescription: 'Construiește un Agent inteligent care poate alege în mod autonom instrumentele pentru a îndeplini sarcinile', - workflowDescription: 'Construiește o aplicație care generează text de înaltă calitate pe baza unui flux de lucru orchestrat cu un grad ridicat de personalizare. Este potrivit pentru utilizatorii experimentați.', workflowWarning: 'În prezent în beta', - chatbotType: 'Metodă de orchestrare a chatbot-ului', - basic: 'De bază', - basicTip: 'Pentru începători, se poate comuta la Chatflow mai târziu', - basicFor: 'PENTRU ÎNCEPĂTORI', - basicDescription: 'Orchestrarea de bază permite orchestrarea unei aplicații Chatbot folosind setări simple, fără posibilitatea de a modifica prompturile încorporate. Este potrivit pentru începători.', - advanced: 'Chatflow', - advancedFor: 'Pentru utilizatori avansați', - advancedDescription: 'Orchestrarea fluxului de lucru orchestrează chatboți sub forma fluxurilor de lucru, oferind un grad ridicat de personalizare, inclusiv posibilitatea de a edita prompturile încorporate. Este potrivit pentru utilizatorii experimentați.', captionName: 'Pictogramă și nume aplicație', appNamePlaceholder: 'Dă-i aplicației tale un nume', captionDescription: 'Descriere', diff --git a/web/i18n/ru-RU/app-annotation.ts b/web/i18n/ru-RU/app-annotation.ts index e189c9ca93..5d55e40174 100644 --- a/web/i18n/ru-RU/app-annotation.ts +++ b/web/i18n/ru-RU/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Сохранить', }, embeddingModelSwitchTip: 'Модель векторизации текста аннотаций, переключение между моделями будет осуществлено повторно, что приведет к дополнительным затратам.', + list: { + delete: { + title: 'Вы уверены, что хотите удалить?', + }, + }, + batchAction: { + cancel: 'Отменить', + selected: 'Выбрано', + delete: 'Удалить', + }, } export default translation diff --git a/web/i18n/ru-RU/app.ts b/web/i18n/ru-RU/app.ts index b02d01b263..bc15d16ee1 100644 --- a/web/i18n/ru-RU/app.ts +++ b/web/i18n/ru-RU/app.ts @@ -31,21 +31,7 @@ const translation = { newApp: { startFromBlank: 'Создать с нуля', startFromTemplate: 'Создать из шаблона', - captionAppType: 'Какой тип приложения вы хотите создать?', - chatbotDescription: 'Создайте приложение на основе чата. Это приложение использует формат вопросов и ответов, позволяя общаться непрерывно.', - completionDescription: 'Создайте приложение, которое генерирует высококачественный текст на основе подсказок, например, генерирует статьи, резюме, переводы и многое другое.', - completionWarning: 'Этот тип приложения больше не будет поддерживаться.', - agentDescription: 'Создайте интеллектуального агента, который может автономно выбирать инструменты для выполнения задач', - workflowDescription: 'Создайте приложение, которое генерирует высококачественный текст на основе рабочего процесса, организованного с высокой степенью настройки. Подходит для опытных пользователей.', workflowWarning: 'В настоящее время находится в бета-версии', - chatbotType: 'Метод организации чат-бота', - basic: 'Базовый', - basicTip: 'Для начинающих, можно переключиться на Chatflow позже', - basicFor: 'ДЛЯ НАЧИНАЮЩИХ', - basicDescription: 'Базовый конструктор позволяет создать приложение чат-бота с помощью простых настроек, без возможности изменять встроенные подсказки. Подходит для начинающих.', - advanced: 'Chatflow', - advancedFor: 'Для продвинутых пользователей', - advancedDescription: 'Организация рабочего процесса организует чат-ботов в виде рабочих процессов, предлагая высокую степень настройки, включая возможность редактирования встроенных подсказок. Подходит для опытных пользователей.', captionName: 'Значок и название приложения', appNamePlaceholder: 'Дайте вашему приложению имя', captionDescription: 'Описание', diff --git a/web/i18n/sl-SI/app-annotation.ts b/web/i18n/sl-SI/app-annotation.ts index 6cd88a47ee..69a6db57be 100644 --- a/web/i18n/sl-SI/app-annotation.ts +++ b/web/i18n/sl-SI/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Shrani', }, embeddingModelSwitchTip: 'Model za vektorizacijo besedila opomb, preklapljanje modelov bo ponovno vektoriziralo, kar bo povzročilo dodatne stroške.', + list: { + delete: { + title: 'Ali ste prepričani, da želite izbrisati?', + }, + }, + batchAction: { + cancel: 'Prekliči', + delete: 'Izbriši', + selected: 'Izbrano', + }, } export default translation diff --git a/web/i18n/sl-SI/app.ts b/web/i18n/sl-SI/app.ts index 337bd10359..61c479e65f 100644 --- a/web/i18n/sl-SI/app.ts +++ b/web/i18n/sl-SI/app.ts @@ -31,21 +31,7 @@ const translation = { newApp: { startFromBlank: 'Ustvari iz nič', startFromTemplate: 'Ustvari iz predloge', - captionAppType: 'Kakšno aplikacijo želite ustvariti?', - chatbotDescription: 'Zgradite aplikacijo, ki temelji na klepetu. Ta aplikacija uporablja format vprašanj in odgovorov, ki omogoča več krogov neprekinjenega pogovora.', - completionDescription: 'Zgradite aplikacijo, ki na podlagi pozivov generira visokokakovostno besedilo, kot je ustvarjanje člankov, povzetkov, prevodov in več.', - completionWarning: 'Ta vrsta aplikacije ne bo več podprta.', - agentDescription: 'Zgradite inteligentnega agenta, ki lahko samostojno izbere orodja za dokončanje nalog.', - workflowDescription: 'Zgradite aplikacijo, ki generira visokokakovostno besedilo na podlagi orkestracije poteka dela z visoko stopnjo prilagodljivosti. Primerna je za izkušene uporabnike.', workflowWarning: 'Trenutno v beta različici', - chatbotType: 'Metoda orkestracije klepetalnika', - basic: 'Osnovno', - basicTip: 'Za začetnike, lahko kasneje preklopite na Chatflow', - basicFor: 'ZA ZAČETNIKE', - basicDescription: 'Osnovna orkestracija omogoča orkestracijo aplikacije klepetalnika z enostavnimi nastavitvami, brez možnosti spreminjanja vgrajenih pozivov. Primerna je za začetnike.', - advanced: 'Chatflow', - advancedFor: 'Za napredne uporabnike', - advancedDescription: 'Orkestracija poteka dela orkestrira klepetalnike v obliki potekov dela, ki ponuja visoko stopnjo prilagodljivosti, vključno z možnostjo urejanja vgrajenih pozivov. Primerna je za izkušene uporabnike.', captionName: 'Ikona in ime aplikacije', appNamePlaceholder: 'Poimenujte svojo aplikacijo', captionDescription: 'Opis', diff --git a/web/i18n/th-TH/app-annotation.ts b/web/i18n/th-TH/app-annotation.ts index f038f5ef8c..60598267c3 100644 --- a/web/i18n/th-TH/app-annotation.ts +++ b/web/i18n/th-TH/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'ประหยัด', }, embeddingModelSwitchTip: 'โมเดลเวกเตอร์ข้อความคําอธิบายประกอบ โมเดลการสลับจะถูกฝังใหม่ส่งผลให้มีค่าใช้จ่ายเพิ่มเติม', + list: { + delete: { + title: 'คุณแน่ใจหรือว่าต้องการลบ?', + }, + }, + batchAction: { + delete: 'ลบ', + selected: 'เลือกไว้', + cancel: 'ยกเลิก', + }, } export default translation diff --git a/web/i18n/th-TH/app.ts b/web/i18n/th-TH/app.ts index 8c8c0e02a2..d0e3394ff8 100644 --- a/web/i18n/th-TH/app.ts +++ b/web/i18n/th-TH/app.ts @@ -29,21 +29,7 @@ const translation = { newApp: { startFromBlank: 'สร้างโปรเจกต์ปล่าว', startFromTemplate: 'สร้างจากเทมเพลต', - captionAppType: 'คุณต้องการสร้างโปรเจกต์ประเภทใด', - chatbotDescription: 'สร้างโปรเจกต์เป็นแอปพลิเคชันที่ใช้การแชท โปรเจกต์นี้ใช้รูปแบบคําถามและคําตอบ ทําให้สามารถสนทนาต่อเนื่องได้หลายรอบ(Multi-turn)', - completionDescription: 'สร้างโปรเจกต์เป็นแอปพลิเคชันที่สร้างข้อความคุณภาพสูงตามข้อความแจ้ง เช่น การสร้างบทความ สรุป การแปล และอื่นๆ', - completionWarning: 'โปรเจกต์ประเภทนี้จะไม่รองรับอีกต่อไป', - agentDescription: 'สร้างตัวแทน(Agent)อัจฉริยะที่สามารถเลือกเครื่องมือเพื่อทํางานให้เสร็จได้โดยอัตโนมัติ', - workflowDescription: 'สร้างโปรเจกต์ เป็นแอปพลิเคชันที่สร้างข้อความคุณภาพสูงตามการประสานกระบวนการทำงาน(Workflow) ที่มีการปรับแต่งในระดับสูง เหมาะสําหรับผู้ใช้ที่มีประสบการณ์', workflowWarning: 'ขณะนี้อยู่ในช่วงเบต้า', - chatbotType: 'รูปแบบแชทบอท', - basic: 'พื้นฐาน', - basicTip: 'สําหรับผู้เริ่มต้นสามารถเปลี่ยนไปใช้ Chatflow ได้ในภายหลัง', - basicFor: 'สําหรับผู้เริ่มต้น', - basicDescription: 'Basic Orchestrate ช่วยให้สามารถประสานงานกันของ โปรเจกต์แชทบอทโดยใช้การตั้งค่าง่ายๆ โดยไม่สามารถแก้ไขข้อความแจ้งในตัวได้ เหมาะสําหรับผู้เริ่มต้น', - advanced: 'แชทโฟลว์', - advancedFor: 'สําหรับผู้ใช้ขั้นสูง ที่สามารถปรับแต่งขั้นตอนและตัวเลือกต่างๆได้อย่างอิสระ', - advancedDescription: 'Workflow Orchestrate ประสานงาน Chatbots ในรูปแบบของเวิร์กโฟลว์ โดยนําเสนอการปรับแต่งในระดับสูง รวมถึงความสามารถในการแก้ไขข้อความแจ้งในตัว เหมาะสําหรับผู้ใช้ที่มีประสบการณ์', captionName: 'ไอคอนและชื่อโปรเจกต์', appNamePlaceholder: 'ตั้งชื่อโปรเจกต์ของคุณ', captionDescription: 'คำอธิบาย', diff --git a/web/i18n/tr-TR/app-annotation.ts b/web/i18n/tr-TR/app-annotation.ts index f9b29bb711..a5974093e8 100644 --- a/web/i18n/tr-TR/app-annotation.ts +++ b/web/i18n/tr-TR/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Kaydet', }, embeddingModelSwitchTip: 'Ek açıklama metin vektörleştirme modeli, model değiştirmek yeniden yerleştirilecek ve ek maliyetlere yol açacaktır.', + list: { + delete: { + title: 'Silmekte emin misin?', + }, + }, + batchAction: { + delete: 'Sil', + selected: 'Seçildi', + cancel: 'İptal et', + }, } export default translation diff --git a/web/i18n/tr-TR/app.ts b/web/i18n/tr-TR/app.ts index 05ad7c1378..1852ee29d2 100644 --- a/web/i18n/tr-TR/app.ts +++ b/web/i18n/tr-TR/app.ts @@ -29,21 +29,7 @@ const translation = { newApp: { startFromBlank: 'Boş Oluştur', startFromTemplate: 'Şablondan Oluştur', - captionAppType: 'Ne tür bir uygulama oluşturmak istiyorsunuz?', - chatbotDescription: 'Sohbete dayalı bir uygulama oluşturun. Bu uygulama, çoklu turlar halinde sürekli konuşmaya izin veren bir soru-cevap formatı kullanır.', - completionDescription: 'Prompt temelinde yüksek kaliteli metinler üreten bir uygulama oluşturun, örneğin makaleler, özetler, çeviriler ve daha fazlasını oluşturmak için.', - completionWarning: 'Bu tür bir uygulama artık desteklenmeyecek.', - agentDescription: 'Görevleri tamamlamak için araçları bağımsız olarak seçebilen bir zeki Agent oluşturun', - workflowDescription: 'Yüksek derecede özelleştirilebilir bir workflow ile yüksek kaliteli metinler üreten bir uygulama oluşturun. Deneyimli kullanıcılar için uygundur.', workflowWarning: 'Şu anda beta aşamasında', - chatbotType: 'Chatbot düzenleme yöntemi', - basic: 'Temel', - basicTip: 'Yeni başlayanlar için, daha sonra Chatflow\'a geçilebilir', - basicFor: 'YENİ BAŞLAYANLAR İÇİN', - basicDescription: 'Temel Orkestrasyon, yerleşik promptları değiştirme yeteneği olmadan, basit ayarlarla bir Chatbot uygulamasının orkestrasyonuna olanak tanır. Yeni başlayanlar için uygundur.', - advanced: 'Chatflow', - advancedFor: 'Gelişmiş kullanıcılar için', - advancedDescription: 'Workflow Orkestrasyonu, yerleşik promptları düzenleme yeteneği de dahil olmak üzere yüksek derecede özelleştirme sunarak Chatbotları workflow formunda düzenler. Deneyimli kullanıcılar için uygundur.', captionName: 'Uygulama simgesi & ismi', appNamePlaceholder: 'Uygulamanıza bir isim verin', captionDescription: 'Açıklama', diff --git a/web/i18n/uk-UA/app-annotation.ts b/web/i18n/uk-UA/app-annotation.ts index 918cea529a..bda4037c9c 100644 --- a/web/i18n/uk-UA/app-annotation.ts +++ b/web/i18n/uk-UA/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Зберегти', }, embeddingModelSwitchTip: 'Модель векторизації тексту анотації, перемикання моделей буде повторно вбудовано, що призведе до додаткових витрат.', + list: { + delete: { + title: 'Ви впевнені, що хочете видалити?', + }, + }, + batchAction: { + selected: 'Вибрано', + cancel: 'Скасувати', + delete: 'Видалити', + }, } export default translation diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts index 26c059f727..77b98beebe 100644 --- a/web/i18n/uk-UA/app.ts +++ b/web/i18n/uk-UA/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Створити з нуля', startFromTemplate: 'Створити з шаблону', - captionAppType: 'Який тип додатка ви хочете створити?', - chatbotDescription: 'Побудуйте додаток на основі чату. Цей додаток використовує формат запитань та відповідей, що дозволяє проводити кілька раундів безперервного спілкування.', - completionDescription: 'Побудуйте додаток, який генерує текст високої якості на основі підказок, таких як генерація статей, резюме, перекладів тощо.', - completionWarning: 'Цей тип додатка більше не буде підтримуватися.', - agentDescription: 'Побудуйте інтелектуального агента, який може автономно обирати інструменти для виконання завдань', - workflowDescription: 'Побудуйте додаток, який генерує текст високої якості на основі робочого процесу з високим рівнем настроювання. Він підходить для досвідчених користувачів.', workflowWarning: 'Наразі в бета-версії', - chatbotType: 'Метод оркестрації чатботу', - basic: 'Базовий', - basicTip: 'Для початківців, можна перейти до Chatflow пізніше', - basicFor: 'ДЛЯ ПОЧАТКІВЦІВ', - basicDescription: 'Базовий оркестр дозволяє оркеструвати додаток чатбота за допомогою простих налаштувань, без можливості змінювати вбудовані підказки. Він підходить для початківців.', - advanced: 'Chatflow', - advancedFor: 'Для досвідчених користувачів', - advancedDescription: 'Оркестрування робочого процесу оркеструє чатботи у формі робочих процесів, пропонуючи високий рівень настроювання, включаючи можливість редагувати вбудовані підказки. Він підходить для досвідчених користувачів.', captionName: 'Іконка та назва додатка', appNamePlaceholder: 'Дайте назву вашому додатку', captionDescription: 'Опис', diff --git a/web/i18n/vi-VN/app-annotation.ts b/web/i18n/vi-VN/app-annotation.ts index 5b9f3b35a5..29499dcc21 100644 --- a/web/i18n/vi-VN/app-annotation.ts +++ b/web/i18n/vi-VN/app-annotation.ts @@ -83,6 +83,16 @@ const translation = { configConfirmBtn: 'Lưu', }, embeddingModelSwitchTip: 'Mô hình vector hóa văn bản chú thích, việc chuyển đổi mô hình sẽ dẫn đến việc nhúng lại, có thể phát sinh thêm chi phí.', + list: { + delete: { + title: 'Bạn có chắc chắn muốn xóa không?', + }, + }, + batchAction: { + delete: 'Xóa', + cancel: 'Hủy', + selected: 'Được chọn', + }, } export default translation diff --git a/web/i18n/vi-VN/app-log.ts b/web/i18n/vi-VN/app-log.ts index 48ae3150b7..aad594dfa8 100644 --- a/web/i18n/vi-VN/app-log.ts +++ b/web/i18n/vi-VN/app-log.ts @@ -84,10 +84,8 @@ const translation = { fileListLabel: 'Chi tiết tệp', }, promptLog: 'Nhật ký lời nhắc', - AgentLog: 'Nhật ký tác nhân', viewLog: 'Xem nhật ký', agentLogDetail: { - AgentMode: 'Chế độ tác nhân', toolUsed: 'Công cụ đã sử dụng', iterations: 'Số lần lặp', iteration: 'Lần lặp', diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts index 9ad2058330..7a992bef77 100644 --- a/web/i18n/vi-VN/app.ts +++ b/web/i18n/vi-VN/app.ts @@ -27,21 +27,7 @@ const translation = { newApp: { startFromBlank: 'Tạo mới', startFromTemplate: 'Tạo từ mẫu', - captionAppType: 'Bạn muốn tạo loại ứng dụng nào?', - chatbotDescription: 'Xây dựng một ứng dụng trò chuyện. Ứng dụng này sử dụng định dạng hỏi đáp, cho phép nhiều vòng trò chuyện liên tục.', - completionDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên gợi ý, như tạo bài viết, tóm tắt, dịch thuật và nhiều hơn nữa.', - completionWarning: 'Loại ứng dụng này sẽ không được hỗ trợ trong tương lai.', - agentDescription: 'Xây dựng một tác nhân thông minh có thể tự động chọn công cụ để hoàn thành các nhiệm vụ', - workflowDescription: 'Xây dựng một ứng dụng tạo văn bản chất lượng cao dựa trên quy trình làm việc với mức độ tùy chỉnh cao. Phù hợp cho người dùng có kinh nghiệm.', workflowWarning: 'Hiện đang trong phiên bản beta', - chatbotType: 'Phương pháp quản lý Chatbot', - basic: 'Cơ bản', - basicTip: 'Dành cho người mới bắt đầu, có thể chuyển sang Chatflow sau này', - basicFor: 'DÀNH CHO NGƯỜI MỚI BẮT ĐẦU', - basicDescription: 'Quản lý cơ bản cho phép quản lý ứng dụng Chatbot bằng cách sử dụng các cài đặt đơn giản, không cần sửa đổi các lời nhắc tích hợp sẵn. Phù hợp cho người mới bắt đầu.', - advanced: 'Chatflow', - advancedFor: 'Dành cho người dùng có kinh nghiệm', - advancedDescription: 'Quản lý Chatbot dưới dạng các quy trình làm việc, cung cấp mức độ tùy chỉnh cao, bao gồm khả năng chỉnh sửa các lời nhắc tích hợp sẵn. Phù hợp cho người dùng có kinh nghiệm.', captionName: 'Biểu tượng và tên ứng dụng', appNamePlaceholder: 'Đặt tên cho ứng dụng của bạn', captionDescription: 'Mô tả', diff --git a/web/i18n/zh-Hant/app-annotation.ts b/web/i18n/zh-Hant/app-annotation.ts index 538546928c..b7416161d6 100644 --- a/web/i18n/zh-Hant/app-annotation.ts +++ b/web/i18n/zh-Hant/app-annotation.ts @@ -83,6 +83,15 @@ const translation = { configConfirmBtn: '儲存', }, embeddingModelSwitchTip: '標註文字向量化模型,切換模型會重新嵌入,產生額外費用消耗', + list: { + delete: { + title: '您確定要刪除嗎?', + }, + }, + batchAction: { + selected: '選擇的', + delete: '刪除', + }, } export default translation From 688d07e9c385ac30fd37cc6cb6b2e20727c95971 Mon Sep 17 00:00:00 2001 From: Hasta Date: Sat, 2 Aug 2025 17:15:55 +0800 Subject: [PATCH 111/415] fix the error of unable to retrieve url from file (#13603) --- api/core/file/file_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/file/file_manager.py b/api/core/file/file_manager.py index f8c050c2ac..770014aa72 100644 --- a/api/core/file/file_manager.py +++ b/api/core/file/file_manager.py @@ -32,7 +32,7 @@ def get_attr(*, file: File, attr: FileAttribute): case FileAttribute.TRANSFER_METHOD: return file.transfer_method.value case FileAttribute.URL: - return file.remote_url + return _to_url(file) case FileAttribute.EXTENSION: return file.extension case FileAttribute.RELATED_ID: From aac849d4f4a35968c7c538a1ea9885bf0424956f Mon Sep 17 00:00:00 2001 From: Matri Qi Date: Sat, 2 Aug 2025 17:35:51 +0800 Subject: [PATCH 112/415] Fix/variable input validation issue (#23300) --- web/app/components/base/toast/index.tsx | 28 +++++++---- .../components/variable/output-var-list.tsx | 48 +++++++++++-------- .../_base/components/variable/var-list.tsx | 45 ++++++++++------- 3 files changed, 75 insertions(+), 46 deletions(-) diff --git a/web/app/components/base/toast/index.tsx b/web/app/components/base/toast/index.tsx index 725c7af8c2..a23a60dbf1 100644 --- a/web/app/components/base/toast/index.tsx +++ b/web/app/components/base/toast/index.tsx @@ -29,6 +29,10 @@ type IToastContext = { close: () => void } +export type ToastHandle = { + clear?: VoidFunction +} + export const ToastContext = createContext({} as IToastContext) export const useToastContext = () => useContext(ToastContext) const Toast = ({ @@ -46,7 +50,7 @@ const Toast = ({ return
    ) => { +}: Pick): ToastHandle => { const defaultDuring = (type === 'success' || type === 'info') ? 3000 : 6000 + const toastHandler: ToastHandle = {} + if (typeof window === 'object') { const holder = document.createElement('div') const root = createRoot(holder) + toastHandler.clear = () => { + if (holder) { + root.unmount() + holder.remove() + } + onClose?.() + } + root.render( , ) document.body.appendChild(holder) - setTimeout(() => { - if (holder) { - root.unmount() - holder.remove() - } - onClose?.() - }, duration || defaultDuring) + setTimeout(toastHandler.clear, duration || defaultDuring) } + + return toastHandler } export default Toast diff --git a/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx b/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx index a7c9a9d172..9fef1fe7b3 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React, { useCallback } from 'react' +import React, { useCallback, useState } from 'react' import produce from 'immer' import { useTranslation } from 'react-i18next' import type { OutputVar } from '../../../code/types' @@ -9,7 +9,9 @@ import VarTypePicker from './var-type-picker' import Input from '@/app/components/base/input' import type { VarType } from '@/app/components/workflow/types' import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import type { ToastHandle } from '@/app/components/base/toast' import Toast from '@/app/components/base/toast' +import { useDebounceFn } from 'ahooks' type Props = { readonly: boolean @@ -27,6 +29,7 @@ const OutputVarList: FC = ({ onRemove, }) => { const { t } = useTranslation() + const [toastHandler, setToastHandler] = useState() const list = outputKeyOrders.map((key) => { return { @@ -34,6 +37,27 @@ const OutputVarList: FC = ({ variable_type: outputs[key]?.type, } }) + + const { run: validateVarInput } = useDebounceFn((existingVariables: typeof list, newKey: string) => { + const { isValid, errorKey, errorMessageKey } = checkKeys([newKey], true) + if (!isValid) { + setToastHandler(Toast.notify({ + type: 'error', + message: t(`appDebug.varKeyError.${errorMessageKey}`, { key: errorKey }), + })) + return + } + if (existingVariables.some(key => key.variable?.trim() === newKey.trim())) { + setToastHandler(Toast.notify({ + type: 'error', + message: t('appDebug.varKeyError.keyAlreadyExists', { key: newKey }), + })) + } + else { + toastHandler?.clear?.() + } + }, { wait: 500 }) + const handleVarNameChange = useCallback((index: number) => { return (e: React.ChangeEvent) => { const oldKey = list[index].variable @@ -41,22 +65,8 @@ const OutputVarList: FC = ({ replaceSpaceWithUnderscreInVarNameInput(e.target) const newKey = e.target.value - const { isValid, errorKey, errorMessageKey } = checkKeys([newKey], true) - if (!isValid) { - Toast.notify({ - type: 'error', - message: t(`appDebug.varKeyError.${errorMessageKey}`, { key: errorKey }), - }) - return - } - - if (list.map(item => item.variable?.trim()).includes(newKey.trim())) { - Toast.notify({ - type: 'error', - message: t('appDebug.varKeyError.keyAlreadyExists', { key: newKey }), - }) - return - } + toastHandler?.clear?.() + validateVarInput(list.toSpliced(index, 1), newKey) const newOutputs = produce(outputs, (draft) => { draft[newKey] = draft[oldKey] @@ -64,8 +74,7 @@ const OutputVarList: FC = ({ }) onChange(newOutputs, index, newKey) } - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [list, onChange, outputs, outputKeyOrders]) + }, [list, onChange, outputs, outputKeyOrders, validateVarInput]) const handleVarTypeChange = useCallback((index: number) => { return (value: string) => { @@ -75,7 +84,6 @@ const OutputVarList: FC = ({ }) onChange(newOutputs) } - // eslint-disable-next-line react-hooks/exhaustive-deps }, [list, onChange, outputs, outputKeyOrders]) const handleVarRemove = useCallback((index: number) => { diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx index b1a8d52a05..2972b33511 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useMemo } from 'react' +import React, { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import produce from 'immer' import RemoveButton from '../remove-button' @@ -9,11 +9,13 @@ import Input from '@/app/components/base/input' import type { ValueSelector, Var, Variable } from '@/app/components/workflow/types' import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types' import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import type { ToastHandle } from '@/app/components/base/toast' import Toast from '@/app/components/base/toast' import { ReactSortable } from 'react-sortablejs' import { v4 as uuid4 } from 'uuid' import { RiDraggable } from '@remixicon/react' import cn from '@/utils/classnames' +import { useDebounceFn } from 'ahooks' type Props = { nodeId: string @@ -39,6 +41,7 @@ const VarList: FC = ({ isSupportFileVar = true, }) => { const { t } = useTranslation() + const [toastHandle, setToastHandle] = useState() const listWithIds = useMemo(() => list.map((item) => { const id = uuid4() @@ -48,27 +51,35 @@ const VarList: FC = ({ } }), [list]) + const { run: validateVarInput } = useDebounceFn((list: Variable[], newKey: string) => { + const { isValid, errorKey, errorMessageKey } = checkKeys([newKey], true) + if (!isValid) { + setToastHandle(Toast.notify({ + type: 'error', + message: t(`appDebug.varKeyError.${errorMessageKey}`, { key: errorKey }), + })) + return + } + if (list.some(item => item.variable?.trim() === newKey.trim())) { + console.log('new key', newKey.trim()) + setToastHandle(Toast.notify({ + type: 'error', + message: t('appDebug.varKeyError.keyAlreadyExists', { key: newKey }), + })) + } + else { + toastHandle?.clear?.() + } + }, { wait: 500 }) + const handleVarNameChange = useCallback((index: number) => { return (e: React.ChangeEvent) => { replaceSpaceWithUnderscreInVarNameInput(e.target) const newKey = e.target.value - const { isValid, errorKey, errorMessageKey } = checkKeys([newKey], true) - if (!isValid) { - Toast.notify({ - type: 'error', - message: t(`appDebug.varKeyError.${errorMessageKey}`, { key: errorKey }), - }) - return - } - if (list.map(item => item.variable?.trim()).includes(newKey.trim())) { - Toast.notify({ - type: 'error', - message: t('appDebug.varKeyError.keyAlreadyExists', { key: newKey }), - }) - return - } + toastHandle?.clear?.() + validateVarInput(list.toSpliced(index, 1), newKey) onVarNameChange?.(list[index].variable, newKey) const newList = produce(list, (draft) => { @@ -76,7 +87,7 @@ const VarList: FC = ({ }) onChange(newList) } - }, [list, onVarNameChange, onChange]) + }, [list, onVarNameChange, onChange, validateVarInput]) const handleVarReferenceChange = useCallback((index: number) => { return (value: ValueSelector | string, varKindType: VarKindType, varInfo?: Var) => { From ff9fd0cdb2063548a31325d12f6a6782b4753d69 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sat, 2 Aug 2025 17:36:15 +0800 Subject: [PATCH 113/415] fix: fix wrong css class (#23299) --- .../variable/variable-label/base/variable-node-label.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx index e4b0e52866..35b539d97a 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/variable-label/base/variable-node-label.tsx @@ -22,7 +22,7 @@ const VariableNodeLabel = ({ { nodeTitle && (
    {nodeTitle} From 58608f51daaadd0a2576383ee3ab6195870a680a Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sun, 3 Aug 2025 00:54:23 +0900 Subject: [PATCH 114/415] replace db with sa to get typing support (#23240) --- api/commands.py | 21 +- api/controllers/console/app/statistic.py | 14 +- .../console/app/workflow_statistic.py | 9 +- api/core/tools/tool_manager.py | 3 +- api/models/account.py | 69 +-- api/models/api_based_extension.py | 8 +- api/models/dataset.py | 269 +++++----- api/models/model.py | 508 +++++++++--------- api/models/provider.py | 58 +- api/models/source.py | 24 +- api/models/task.py | 13 +- api/models/tools.py | 152 +++--- api/models/web.py | 17 +- api/models/workflow.py | 56 +- api/services/plugin/data_migration.py | 9 +- api/services/plugin/plugin_migration.py | 3 +- api/tasks/remove_app_and_related_data_task.py | 3 +- 17 files changed, 623 insertions(+), 613 deletions(-) diff --git a/api/commands.py b/api/commands.py index 79bb6713d0..8177f1a48c 100644 --- a/api/commands.py +++ b/api/commands.py @@ -5,6 +5,7 @@ import secrets from typing import Any, Optional import click +import sqlalchemy as sa from flask import current_app from pydantic import TypeAdapter from sqlalchemy import select @@ -457,7 +458,7 @@ def convert_to_agent_apps(): """ with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query)) + rs = conn.execute(sa.text(sql_query)) apps = [] for i in rs: @@ -702,7 +703,7 @@ def fix_app_site_missing(): sql = """select apps.id as id from apps left join sites on sites.app_id=apps.id where sites.id is null limit 1000""" with db.engine.begin() as conn: - rs = conn.execute(db.text(sql)) + rs = conn.execute(sa.text(sql)) processed_count = 0 for i in rs: @@ -916,7 +917,7 @@ def clear_orphaned_file_records(force: bool): ) orphaned_message_files = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: orphaned_message_files.append({"id": str(i[0]), "message_id": str(i[1])}) @@ -937,7 +938,7 @@ def clear_orphaned_file_records(force: bool): click.echo(click.style("- Deleting orphaned message_files records", fg="white")) query = "DELETE FROM message_files WHERE id IN :ids" with db.engine.begin() as conn: - conn.execute(db.text(query), {"ids": tuple([record["id"] for record in orphaned_message_files])}) + conn.execute(sa.text(query), {"ids": tuple([record["id"] for record in orphaned_message_files])}) click.echo( click.style(f"Removed {len(orphaned_message_files)} orphaned message_files records.", fg="green") ) @@ -954,7 +955,7 @@ def clear_orphaned_file_records(force: bool): click.echo(click.style(f"- Listing file records in table {files_table['table']}", fg="white")) query = f"SELECT {files_table['id_column']}, {files_table['key_column']} FROM {files_table['table']}" with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: all_files_in_tables.append({"table": files_table["table"], "id": str(i[0]), "key": i[1]}) click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white")) @@ -974,7 +975,7 @@ def clear_orphaned_file_records(force: bool): f"SELECT {ids_table['column']} FROM {ids_table['table']} WHERE {ids_table['column']} IS NOT NULL" ) with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: all_ids_in_tables.append({"table": ids_table["table"], "id": str(i[0])}) elif ids_table["type"] == "text": @@ -989,7 +990,7 @@ def clear_orphaned_file_records(force: bool): f"FROM {ids_table['table']}" ) with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: for j in i[0]: all_ids_in_tables.append({"table": ids_table["table"], "id": j}) @@ -1008,7 +1009,7 @@ def clear_orphaned_file_records(force: bool): f"FROM {ids_table['table']}" ) with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: for j in i[0]: all_ids_in_tables.append({"table": ids_table["table"], "id": j}) @@ -1037,7 +1038,7 @@ def clear_orphaned_file_records(force: bool): click.echo(click.style(f"- Deleting orphaned file records in table {files_table['table']}", fg="white")) query = f"DELETE FROM {files_table['table']} WHERE {files_table['id_column']} IN :ids" with db.engine.begin() as conn: - conn.execute(db.text(query), {"ids": tuple(orphaned_files)}) + conn.execute(sa.text(query), {"ids": tuple(orphaned_files)}) except Exception as e: click.echo(click.style(f"Error deleting orphaned file records: {str(e)}", fg="red")) return @@ -1107,7 +1108,7 @@ def remove_orphaned_files_on_storage(force: bool): click.echo(click.style(f"- Listing files from table {files_table['table']}", fg="white")) query = f"SELECT {files_table['key_column']} FROM {files_table['table']}" with db.engine.begin() as conn: - rs = conn.execute(db.text(query)) + rs = conn.execute(sa.text(query)) for i in rs: all_files_in_tables.append(str(i[0])) click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white")) diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index 32b64d10c5..343b7acd7b 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -67,7 +67,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "message_count": i.message_count}) @@ -176,7 +176,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "terminal_count": i.terminal_count}) @@ -234,7 +234,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append( {"date": str(i.date), "token_count": i.token_count, "total_price": i.total_price, "currency": "USD"} @@ -310,7 +310,7 @@ ORDER BY response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append( {"date": str(i.date), "interactions": float(i.interactions.quantize(Decimal("0.01")))} @@ -373,7 +373,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append( { @@ -435,7 +435,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "latency": round(i.latency * 1000, 4)}) @@ -495,7 +495,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "tps": round(i.tokens_per_second, 4)}) diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index 6c7c73707b..7f80afd83b 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -2,6 +2,7 @@ from datetime import datetime from decimal import Decimal import pytz +import sqlalchemy as sa from flask import jsonify from flask_login import current_user from flask_restful import Resource, reqparse @@ -71,7 +72,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "runs": i.runs}) @@ -133,7 +134,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append({"date": str(i.date), "terminal_count": i.terminal_count}) @@ -195,7 +196,7 @@ WHERE response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append( { @@ -277,7 +278,7 @@ GROUP BY response_data = [] with db.engine.begin() as conn: - rs = conn.execute(db.text(sql_query), arg_dict) + rs = conn.execute(sa.text(sql_query), arg_dict) for i in rs: response_data.append( {"date": str(i.date), "interactions": float(i.interactions.quantize(Decimal("0.01")))} diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 1bb4cfa4cd..2737bcfb16 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -7,6 +7,7 @@ from os import listdir, path from threading import Lock from typing import TYPE_CHECKING, Any, Literal, Optional, Union, cast +import sqlalchemy as sa from pydantic import TypeAdapter from yarl import URL @@ -616,7 +617,7 @@ class ToolManager: WHERE tenant_id = :tenant_id ORDER BY tenant_id, provider, is_default DESC, created_at DESC """ - ids = [row.id for row in db.session.execute(db.text(sql), {"tenant_id": tenant_id}).all()] + ids = [row.id for row in db.session.execute(sa.text(sql), {"tenant_id": tenant_id}).all()] return db.session.query(BuiltinToolProvider).where(BuiltinToolProvider.id.in_(ids)).all() @classmethod diff --git a/api/models/account.py b/api/models/account.py index 3437055893..1a0752440d 100644 --- a/api/models/account.py +++ b/api/models/account.py @@ -3,6 +3,7 @@ import json from datetime import datetime from typing import Optional, cast +import sqlalchemy as sa from flask_login import UserMixin # type: ignore from sqlalchemy import DateTime, String, func, select from sqlalchemy.orm import Mapped, mapped_column, reconstructor @@ -83,9 +84,9 @@ class AccountStatus(enum.StrEnum): class Account(UserMixin, Base): __tablename__ = "accounts" - __table_args__ = (db.PrimaryKeyConstraint("id", name="account_pkey"), db.Index("account_email_idx", "email")) + __table_args__ = (sa.PrimaryKeyConstraint("id", name="account_pkey"), sa.Index("account_email_idx", "email")) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) name: Mapped[str] = mapped_column(String(255)) email: Mapped[str] = mapped_column(String(255)) password: Mapped[Optional[str]] = mapped_column(String(255)) @@ -97,7 +98,7 @@ class Account(UserMixin, Base): last_login_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) last_login_ip: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) last_active_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) - status: Mapped[str] = mapped_column(String(16), server_default=db.text("'active'::character varying")) + status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'active'::character varying")) initialized_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) @@ -195,14 +196,14 @@ class TenantStatus(enum.StrEnum): class Tenant(Base): __tablename__ = "tenants" - __table_args__ = (db.PrimaryKeyConstraint("id", name="tenant_pkey"),) + __table_args__ = (sa.PrimaryKeyConstraint("id", name="tenant_pkey"),) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) name: Mapped[str] = mapped_column(String(255)) - encrypt_public_key = db.Column(db.Text) - plan: Mapped[str] = mapped_column(String(255), server_default=db.text("'basic'::character varying")) - status: Mapped[str] = mapped_column(String(255), server_default=db.text("'normal'::character varying")) - custom_config: Mapped[Optional[str]] = mapped_column(db.Text) + encrypt_public_key = db.Column(sa.Text) + plan: Mapped[str] = mapped_column(String(255), server_default=sa.text("'basic'::character varying")) + status: Mapped[str] = mapped_column(String(255), server_default=sa.text("'normal'::character varying")) + custom_config: Mapped[Optional[str]] = mapped_column(sa.Text) created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp(), nullable=False) updated_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) @@ -225,16 +226,16 @@ class Tenant(Base): class TenantAccountJoin(Base): __tablename__ = "tenant_account_joins" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tenant_account_join_pkey"), - db.Index("tenant_account_join_account_id_idx", "account_id"), - db.Index("tenant_account_join_tenant_id_idx", "tenant_id"), - db.UniqueConstraint("tenant_id", "account_id", name="unique_tenant_account_join"), + sa.PrimaryKeyConstraint("id", name="tenant_account_join_pkey"), + sa.Index("tenant_account_join_account_id_idx", "account_id"), + sa.Index("tenant_account_join_tenant_id_idx", "tenant_id"), + sa.UniqueConstraint("tenant_id", "account_id", name="unique_tenant_account_join"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) account_id: Mapped[str] = mapped_column(StringUUID) - current: Mapped[bool] = mapped_column(db.Boolean, server_default=db.text("false")) + current: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) role: Mapped[str] = mapped_column(String(16), server_default="normal") invited_by: Mapped[Optional[str]] = mapped_column(StringUUID) created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) @@ -244,12 +245,12 @@ class TenantAccountJoin(Base): class AccountIntegrate(Base): __tablename__ = "account_integrates" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="account_integrate_pkey"), - db.UniqueConstraint("account_id", "provider", name="unique_account_provider"), - db.UniqueConstraint("provider", "open_id", name="unique_provider_open_id"), + sa.PrimaryKeyConstraint("id", name="account_integrate_pkey"), + sa.UniqueConstraint("account_id", "provider", name="unique_account_provider"), + sa.UniqueConstraint("provider", "open_id", name="unique_provider_open_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) account_id: Mapped[str] = mapped_column(StringUUID) provider: Mapped[str] = mapped_column(String(16)) open_id: Mapped[str] = mapped_column(String(255)) @@ -261,20 +262,20 @@ class AccountIntegrate(Base): class InvitationCode(Base): __tablename__ = "invitation_codes" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="invitation_code_pkey"), - db.Index("invitation_codes_batch_idx", "batch"), - db.Index("invitation_codes_code_idx", "code", "status"), + sa.PrimaryKeyConstraint("id", name="invitation_code_pkey"), + sa.Index("invitation_codes_batch_idx", "batch"), + sa.Index("invitation_codes_code_idx", "code", "status"), ) - id: Mapped[int] = mapped_column(db.Integer) + id: Mapped[int] = mapped_column(sa.Integer) batch: Mapped[str] = mapped_column(String(255)) code: Mapped[str] = mapped_column(String(32)) - status: Mapped[str] = mapped_column(String(16), server_default=db.text("'unused'::character varying")) + status: Mapped[str] = mapped_column(String(16), server_default=sa.text("'unused'::character varying")) used_at: Mapped[Optional[datetime]] = mapped_column(DateTime) used_by_tenant_id: Mapped[Optional[str]] = mapped_column(StringUUID) used_by_account_id: Mapped[Optional[str]] = mapped_column(StringUUID) deprecated_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) - created_at: Mapped[datetime] = mapped_column(DateTime, server_default=db.text("CURRENT_TIMESTAMP(0)")) + created_at: Mapped[datetime] = mapped_column(DateTime, server_default=sa.text("CURRENT_TIMESTAMP(0)")) class TenantPluginPermission(Base): @@ -290,11 +291,11 @@ class TenantPluginPermission(Base): __tablename__ = "account_plugin_permissions" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="account_plugin_permission_pkey"), - db.UniqueConstraint("tenant_id", name="unique_tenant_plugin"), + sa.PrimaryKeyConstraint("id", name="account_plugin_permission_pkey"), + sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) install_permission: Mapped[InstallPermission] = mapped_column(String(16), nullable=False, server_default="everyone") debug_permission: Mapped[DebugPermission] = mapped_column(String(16), nullable=False, server_default="noone") @@ -313,16 +314,16 @@ class TenantPluginAutoUpgradeStrategy(Base): __tablename__ = "tenant_plugin_auto_upgrade_strategies" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tenant_plugin_auto_upgrade_strategy_pkey"), - db.UniqueConstraint("tenant_id", name="unique_tenant_plugin_auto_upgrade_strategy"), + sa.PrimaryKeyConstraint("id", name="tenant_plugin_auto_upgrade_strategy_pkey"), + sa.UniqueConstraint("tenant_id", name="unique_tenant_plugin_auto_upgrade_strategy"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) strategy_setting: Mapped[StrategySetting] = mapped_column(String(16), nullable=False, server_default="fix_only") - upgrade_time_of_day: Mapped[int] = mapped_column(db.Integer, nullable=False, default=0) # seconds of the day + upgrade_time_of_day: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) # seconds of the day upgrade_mode: Mapped[UpgradeMode] = mapped_column(String(16), nullable=False, server_default="exclude") - exclude_plugins: Mapped[list[str]] = mapped_column(db.ARRAY(String(255)), nullable=False) # plugin_id (author/name) - include_plugins: Mapped[list[str]] = mapped_column(db.ARRAY(String(255)), nullable=False) # plugin_id (author/name) + exclude_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) + include_plugins: Mapped[list[str]] = mapped_column(sa.ARRAY(String(255)), nullable=False) # plugin_id (author/name) created_at = db.Column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at = db.Column(DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/api/models/api_based_extension.py b/api/models/api_based_extension.py index ac9eda6829..60167d9069 100644 --- a/api/models/api_based_extension.py +++ b/api/models/api_based_extension.py @@ -1,11 +1,11 @@ import enum from datetime import datetime +import sqlalchemy as sa from sqlalchemy import DateTime, String, Text, func from sqlalchemy.orm import Mapped, mapped_column from .base import Base -from .engine import db from .types import StringUUID @@ -19,11 +19,11 @@ class APIBasedExtensionPoint(enum.Enum): class APIBasedExtension(Base): __tablename__ = "api_based_extensions" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="api_based_extension_pkey"), - db.Index("api_based_extension_tenant_idx", "tenant_id"), + sa.PrimaryKeyConstraint("id", name="api_based_extension_pkey"), + sa.Index("api_based_extension_tenant_idx", "tenant_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) api_endpoint: Mapped[str] = mapped_column(String(255), nullable=False) diff --git a/api/models/dataset.py b/api/models/dataset.py index e62101ae73..3b1d289bc4 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -12,6 +12,7 @@ from datetime import datetime from json import JSONDecodeError from typing import Any, Optional, cast +import sqlalchemy as sa from sqlalchemy import DateTime, String, func, select from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Mapped, mapped_column @@ -38,23 +39,23 @@ class DatasetPermissionEnum(enum.StrEnum): class Dataset(Base): __tablename__ = "datasets" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_pkey"), - db.Index("dataset_tenant_idx", "tenant_id"), - db.Index("retrieval_model_idx", "retrieval_model", postgresql_using="gin"), + sa.PrimaryKeyConstraint("id", name="dataset_pkey"), + sa.Index("dataset_tenant_idx", "tenant_id"), + sa.Index("retrieval_model_idx", "retrieval_model", postgresql_using="gin"), ) INDEXING_TECHNIQUE_LIST = ["high_quality", "economy", None] PROVIDER_LIST = ["vendor", "external", None] - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) name: Mapped[str] = mapped_column(String(255)) - description = mapped_column(db.Text, nullable=True) - provider: Mapped[str] = mapped_column(String(255), server_default=db.text("'vendor'::character varying")) - permission: Mapped[str] = mapped_column(String(255), server_default=db.text("'only_me'::character varying")) + description = mapped_column(sa.Text, nullable=True) + provider: Mapped[str] = mapped_column(String(255), server_default=sa.text("'vendor'::character varying")) + permission: Mapped[str] = mapped_column(String(255), server_default=sa.text("'only_me'::character varying")) data_source_type = mapped_column(String(255)) indexing_technique: Mapped[Optional[str]] = mapped_column(String(255)) - index_struct = mapped_column(db.Text, nullable=True) + index_struct = mapped_column(sa.Text, nullable=True) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) @@ -63,7 +64,7 @@ class Dataset(Base): embedding_model_provider = db.Column(String(255), nullable=True) # TODO: mapped_column collection_binding_id = mapped_column(StringUUID, nullable=True) retrieval_model = mapped_column(JSONB, nullable=True) - built_in_field_enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + built_in_field_enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) @property def dataset_keyword_table(self): @@ -262,14 +263,14 @@ class Dataset(Base): class DatasetProcessRule(Base): __tablename__ = "dataset_process_rules" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_process_rule_pkey"), - db.Index("dataset_process_rule_dataset_id_idx", "dataset_id"), + sa.PrimaryKeyConstraint("id", name="dataset_process_rule_pkey"), + sa.Index("dataset_process_rule_dataset_id_idx", "dataset_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) dataset_id = mapped_column(StringUUID, nullable=False) - mode = mapped_column(String(255), nullable=False, server_default=db.text("'automatic'::character varying")) - rules = mapped_column(db.Text, nullable=True) + mode = mapped_column(String(255), nullable=False, server_default=sa.text("'automatic'::character varying")) + rules = mapped_column(sa.Text, nullable=True) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -302,20 +303,20 @@ class DatasetProcessRule(Base): class Document(Base): __tablename__ = "documents" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="document_pkey"), - db.Index("document_dataset_id_idx", "dataset_id"), - db.Index("document_is_paused_idx", "is_paused"), - db.Index("document_tenant_idx", "tenant_id"), - db.Index("document_metadata_idx", "doc_metadata", postgresql_using="gin"), + sa.PrimaryKeyConstraint("id", name="document_pkey"), + sa.Index("document_dataset_id_idx", "dataset_id"), + sa.Index("document_is_paused_idx", "is_paused"), + sa.Index("document_tenant_idx", "tenant_id"), + sa.Index("document_metadata_idx", "doc_metadata", postgresql_using="gin"), ) # initial fields - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) - position: Mapped[int] = mapped_column(db.Integer, nullable=False) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False) data_source_type: Mapped[str] = mapped_column(String(255), nullable=False) - data_source_info = mapped_column(db.Text, nullable=True) + data_source_info = mapped_column(sa.Text, nullable=True) dataset_process_rule_id = mapped_column(StringUUID, nullable=True) batch: Mapped[str] = mapped_column(String(255), nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) @@ -328,8 +329,8 @@ class Document(Base): processing_started_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # parsing - file_id = mapped_column(db.Text, nullable=True) - word_count: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) # TODO: make this not nullable + file_id = mapped_column(sa.Text, nullable=True) + word_count: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) # TODO: make this not nullable parsing_completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # cleaning @@ -339,32 +340,32 @@ class Document(Base): splitting_completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # indexing - tokens: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - indexing_latency: Mapped[Optional[float]] = mapped_column(db.Float, nullable=True) + tokens: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + indexing_latency: Mapped[Optional[float]] = mapped_column(sa.Float, nullable=True) completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # pause - is_paused: Mapped[Optional[bool]] = mapped_column(db.Boolean, nullable=True, server_default=db.text("false")) + is_paused: Mapped[Optional[bool]] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) paused_by = mapped_column(StringUUID, nullable=True) paused_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # error - error = mapped_column(db.Text, nullable=True) + error = mapped_column(sa.Text, nullable=True) stopped_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) # basic fields - indexing_status = mapped_column(String(255), nullable=False, server_default=db.text("'waiting'::character varying")) - enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + indexing_status = mapped_column(String(255), nullable=False, server_default=sa.text("'waiting'::character varying")) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) disabled_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) disabled_by = mapped_column(StringUUID, nullable=True) - archived: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + archived: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) archived_reason = mapped_column(String(255), nullable=True) archived_by = mapped_column(StringUUID, nullable=True) archived_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) doc_type = mapped_column(String(40), nullable=True) doc_metadata = mapped_column(JSONB, nullable=True) - doc_form = mapped_column(String(255), nullable=False, server_default=db.text("'text_model'::character varying")) + doc_form = mapped_column(String(255), nullable=False, server_default=sa.text("'text_model'::character varying")) doc_language = mapped_column(String(255), nullable=True) DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"] @@ -643,44 +644,44 @@ class Document(Base): class DocumentSegment(Base): __tablename__ = "document_segments" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="document_segment_pkey"), - db.Index("document_segment_dataset_id_idx", "dataset_id"), - db.Index("document_segment_document_id_idx", "document_id"), - db.Index("document_segment_tenant_dataset_idx", "dataset_id", "tenant_id"), - db.Index("document_segment_tenant_document_idx", "document_id", "tenant_id"), - db.Index("document_segment_node_dataset_idx", "index_node_id", "dataset_id"), - db.Index("document_segment_tenant_idx", "tenant_id"), + sa.PrimaryKeyConstraint("id", name="document_segment_pkey"), + sa.Index("document_segment_dataset_id_idx", "dataset_id"), + sa.Index("document_segment_document_id_idx", "document_id"), + sa.Index("document_segment_tenant_dataset_idx", "dataset_id", "tenant_id"), + sa.Index("document_segment_tenant_document_idx", "document_id", "tenant_id"), + sa.Index("document_segment_node_dataset_idx", "index_node_id", "dataset_id"), + sa.Index("document_segment_tenant_idx", "tenant_id"), ) # initial fields - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) document_id = mapped_column(StringUUID, nullable=False) position: Mapped[int] - content = mapped_column(db.Text, nullable=False) - answer = mapped_column(db.Text, nullable=True) + content = mapped_column(sa.Text, nullable=False) + answer = mapped_column(sa.Text, nullable=True) word_count: Mapped[int] tokens: Mapped[int] # indexing fields - keywords = mapped_column(db.JSON, nullable=True) + keywords = mapped_column(sa.JSON, nullable=True) index_node_id = mapped_column(String(255), nullable=True) index_node_hash = mapped_column(String(255), nullable=True) # basic fields - hit_count: Mapped[int] = mapped_column(db.Integer, nullable=False, default=0) - enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + hit_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) disabled_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) disabled_by = mapped_column(StringUUID, nullable=True) - status: Mapped[str] = mapped_column(String(255), server_default=db.text("'waiting'::character varying")) + status: Mapped[str] = mapped_column(String(255), server_default=sa.text("'waiting'::character varying")) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) indexing_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) - error = mapped_column(db.Text, nullable=True) + error = mapped_column(sa.Text, nullable=True) stopped_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) @property @@ -794,36 +795,36 @@ class DocumentSegment(Base): class ChildChunk(Base): __tablename__ = "child_chunks" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="child_chunk_pkey"), - db.Index("child_chunk_dataset_id_idx", "tenant_id", "dataset_id", "document_id", "segment_id", "index_node_id"), - db.Index("child_chunks_node_idx", "index_node_id", "dataset_id"), - db.Index("child_chunks_segment_idx", "segment_id"), + sa.PrimaryKeyConstraint("id", name="child_chunk_pkey"), + sa.Index("child_chunk_dataset_id_idx", "tenant_id", "dataset_id", "document_id", "segment_id", "index_node_id"), + sa.Index("child_chunks_node_idx", "index_node_id", "dataset_id"), + sa.Index("child_chunks_segment_idx", "segment_id"), ) # initial fields - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) document_id = mapped_column(StringUUID, nullable=False) segment_id = mapped_column(StringUUID, nullable=False) - position: Mapped[int] = mapped_column(db.Integer, nullable=False) - content = mapped_column(db.Text, nullable=False) - word_count: Mapped[int] = mapped_column(db.Integer, nullable=False) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False) + content = mapped_column(sa.Text, nullable=False) + word_count: Mapped[int] = mapped_column(sa.Integer, nullable=False) # indexing fields index_node_id = mapped_column(String(255), nullable=True) index_node_hash = mapped_column(String(255), nullable=True) - type = mapped_column(String(255), nullable=False, server_default=db.text("'automatic'::character varying")) + type = mapped_column(String(255), nullable=False, server_default=sa.text("'automatic'::character varying")) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) updated_by = mapped_column(StringUUID, nullable=True) updated_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) indexing_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) completed_at: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) - error = mapped_column(db.Text, nullable=True) + error = mapped_column(sa.Text, nullable=True) @property def dataset(self): @@ -841,11 +842,11 @@ class ChildChunk(Base): class AppDatasetJoin(Base): __tablename__ = "app_dataset_joins" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="app_dataset_join_pkey"), - db.Index("app_dataset_join_app_dataset_idx", "dataset_id", "app_id"), + sa.PrimaryKeyConstraint("id", name="app_dataset_join_pkey"), + sa.Index("app_dataset_join_app_dataset_idx", "dataset_id", "app_id"), ) - id = mapped_column(StringUUID, primary_key=True, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, nullable=False, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=db.func.current_timestamp()) @@ -858,13 +859,13 @@ class AppDatasetJoin(Base): class DatasetQuery(Base): __tablename__ = "dataset_queries" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_query_pkey"), - db.Index("dataset_query_dataset_id_idx", "dataset_id"), + sa.PrimaryKeyConstraint("id", name="dataset_query_pkey"), + sa.Index("dataset_query_dataset_id_idx", "dataset_id"), ) - id = mapped_column(StringUUID, primary_key=True, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, nullable=False, server_default=sa.text("uuid_generate_v4()")) dataset_id = mapped_column(StringUUID, nullable=False) - content = mapped_column(db.Text, nullable=False) + content = mapped_column(sa.Text, nullable=False) source: Mapped[str] = mapped_column(String(255), nullable=False) source_app_id = mapped_column(StringUUID, nullable=True) created_by_role = mapped_column(String, nullable=False) @@ -875,15 +876,15 @@ class DatasetQuery(Base): class DatasetKeywordTable(Base): __tablename__ = "dataset_keyword_tables" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_keyword_table_pkey"), - db.Index("dataset_keyword_table_dataset_id_idx", "dataset_id"), + sa.PrimaryKeyConstraint("id", name="dataset_keyword_table_pkey"), + sa.Index("dataset_keyword_table_dataset_id_idx", "dataset_id"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) dataset_id = mapped_column(StringUUID, nullable=False, unique=True) - keyword_table = mapped_column(db.Text, nullable=False) + keyword_table = mapped_column(sa.Text, nullable=False) data_source_type = mapped_column( - String(255), nullable=False, server_default=db.text("'database'::character varying") + String(255), nullable=False, server_default=sa.text("'database'::character varying") ) @property @@ -920,19 +921,19 @@ class DatasetKeywordTable(Base): class Embedding(Base): __tablename__ = "embeddings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="embedding_pkey"), - db.UniqueConstraint("model_name", "hash", "provider_name", name="embedding_hash_idx"), - db.Index("created_at_idx", "created_at"), + sa.PrimaryKeyConstraint("id", name="embedding_pkey"), + sa.UniqueConstraint("model_name", "hash", "provider_name", name="embedding_hash_idx"), + sa.Index("created_at_idx", "created_at"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) model_name = mapped_column( - String(255), nullable=False, server_default=db.text("'text-embedding-ada-002'::character varying") + String(255), nullable=False, server_default=sa.text("'text-embedding-ada-002'::character varying") ) hash = mapped_column(String(64), nullable=False) - embedding = mapped_column(db.LargeBinary, nullable=False) + embedding = mapped_column(sa.LargeBinary, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - provider_name = mapped_column(String(255), nullable=False, server_default=db.text("''::character varying")) + provider_name = mapped_column(String(255), nullable=False, server_default=sa.text("''::character varying")) def set_embedding(self, embedding_data: list[float]): self.embedding = pickle.dumps(embedding_data, protocol=pickle.HIGHEST_PROTOCOL) @@ -944,14 +945,14 @@ class Embedding(Base): class DatasetCollectionBinding(Base): __tablename__ = "dataset_collection_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_collection_bindings_pkey"), - db.Index("provider_model_name_idx", "provider_name", "model_name"), + sa.PrimaryKeyConstraint("id", name="dataset_collection_bindings_pkey"), + sa.Index("provider_model_name_idx", "provider_name", "model_name"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) provider_name: Mapped[str] = mapped_column(String(255), nullable=False) model_name: Mapped[str] = mapped_column(String(255), nullable=False) - type = mapped_column(String(40), server_default=db.text("'dataset'::character varying"), nullable=False) + type = mapped_column(String(40), server_default=sa.text("'dataset'::character varying"), nullable=False) collection_name = mapped_column(String(64), nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -959,17 +960,17 @@ class DatasetCollectionBinding(Base): class TidbAuthBinding(Base): __tablename__ = "tidb_auth_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tidb_auth_bindings_pkey"), - db.Index("tidb_auth_bindings_tenant_idx", "tenant_id"), - db.Index("tidb_auth_bindings_active_idx", "active"), - db.Index("tidb_auth_bindings_created_at_idx", "created_at"), - db.Index("tidb_auth_bindings_status_idx", "status"), + sa.PrimaryKeyConstraint("id", name="tidb_auth_bindings_pkey"), + sa.Index("tidb_auth_bindings_tenant_idx", "tenant_id"), + sa.Index("tidb_auth_bindings_active_idx", "active"), + sa.Index("tidb_auth_bindings_created_at_idx", "created_at"), + sa.Index("tidb_auth_bindings_status_idx", "status"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=True) cluster_id: Mapped[str] = mapped_column(String(255), nullable=False) cluster_name: Mapped[str] = mapped_column(String(255), nullable=False) - active: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + active: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=db.text("false")) status = mapped_column(String(255), nullable=False, server_default=db.text("'CREATING'::character varying")) account: Mapped[str] = mapped_column(String(255), nullable=False) password: Mapped[str] = mapped_column(String(255), nullable=False) @@ -979,10 +980,10 @@ class TidbAuthBinding(Base): class Whitelist(Base): __tablename__ = "whitelists" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="whitelists_pkey"), - db.Index("whitelists_tenant_idx", "tenant_id"), + sa.PrimaryKeyConstraint("id", name="whitelists_pkey"), + sa.Index("whitelists_tenant_idx", "tenant_id"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=True) category: Mapped[str] = mapped_column(String(255), nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -991,33 +992,33 @@ class Whitelist(Base): class DatasetPermission(Base): __tablename__ = "dataset_permissions" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_permission_pkey"), - db.Index("idx_dataset_permissions_dataset_id", "dataset_id"), - db.Index("idx_dataset_permissions_account_id", "account_id"), - db.Index("idx_dataset_permissions_tenant_id", "tenant_id"), + sa.PrimaryKeyConstraint("id", name="dataset_permission_pkey"), + sa.Index("idx_dataset_permissions_dataset_id", "dataset_id"), + sa.Index("idx_dataset_permissions_account_id", "account_id"), + sa.Index("idx_dataset_permissions_tenant_id", "tenant_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()"), primary_key=True) dataset_id = mapped_column(StringUUID, nullable=False) account_id = mapped_column(StringUUID, nullable=False) tenant_id = mapped_column(StringUUID, nullable=False) - has_permission: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + has_permission: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) class ExternalKnowledgeApis(Base): __tablename__ = "external_knowledge_apis" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="external_knowledge_apis_pkey"), - db.Index("external_knowledge_apis_tenant_idx", "tenant_id"), - db.Index("external_knowledge_apis_name_idx", "name"), + sa.PrimaryKeyConstraint("id", name="external_knowledge_apis_pkey"), + sa.Index("external_knowledge_apis_tenant_idx", "tenant_id"), + sa.Index("external_knowledge_apis_name_idx", "name"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) name: Mapped[str] = mapped_column(String(255), nullable=False) description: Mapped[str] = mapped_column(String(255), nullable=False) tenant_id = mapped_column(StringUUID, nullable=False) - settings = mapped_column(db.Text, nullable=True) + settings = mapped_column(sa.Text, nullable=True) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) @@ -1061,18 +1062,18 @@ class ExternalKnowledgeApis(Base): class ExternalKnowledgeBindings(Base): __tablename__ = "external_knowledge_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="external_knowledge_bindings_pkey"), - db.Index("external_knowledge_bindings_tenant_idx", "tenant_id"), - db.Index("external_knowledge_bindings_dataset_idx", "dataset_id"), - db.Index("external_knowledge_bindings_external_knowledge_idx", "external_knowledge_id"), - db.Index("external_knowledge_bindings_external_knowledge_api_idx", "external_knowledge_api_id"), + sa.PrimaryKeyConstraint("id", name="external_knowledge_bindings_pkey"), + sa.Index("external_knowledge_bindings_tenant_idx", "tenant_id"), + sa.Index("external_knowledge_bindings_dataset_idx", "dataset_id"), + sa.Index("external_knowledge_bindings_external_knowledge_idx", "external_knowledge_id"), + sa.Index("external_knowledge_bindings_external_knowledge_api_idx", "external_knowledge_api_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) external_knowledge_api_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) - external_knowledge_id = mapped_column(db.Text, nullable=False) + external_knowledge_id = mapped_column(sa.Text, nullable=False) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) @@ -1082,57 +1083,57 @@ class ExternalKnowledgeBindings(Base): class DatasetAutoDisableLog(Base): __tablename__ = "dataset_auto_disable_logs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_auto_disable_log_pkey"), - db.Index("dataset_auto_disable_log_tenant_idx", "tenant_id"), - db.Index("dataset_auto_disable_log_dataset_idx", "dataset_id"), - db.Index("dataset_auto_disable_log_created_atx", "created_at"), + sa.PrimaryKeyConstraint("id", name="dataset_auto_disable_log_pkey"), + sa.Index("dataset_auto_disable_log_tenant_idx", "tenant_id"), + sa.Index("dataset_auto_disable_log_dataset_idx", "dataset_id"), + sa.Index("dataset_auto_disable_log_created_atx", "created_at"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) document_id = mapped_column(StringUUID, nullable=False) - notified: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + notified: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) created_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) class RateLimitLog(Base): __tablename__ = "rate_limit_logs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="rate_limit_log_pkey"), - db.Index("rate_limit_log_tenant_idx", "tenant_id"), - db.Index("rate_limit_log_operation_idx", "operation"), + sa.PrimaryKeyConstraint("id", name="rate_limit_log_pkey"), + sa.Index("rate_limit_log_tenant_idx", "tenant_id"), + sa.Index("rate_limit_log_operation_idx", "operation"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) subscription_plan: Mapped[str] = mapped_column(String(255), nullable=False) operation: Mapped[str] = mapped_column(String(255), nullable=False) created_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) class DatasetMetadata(Base): __tablename__ = "dataset_metadatas" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_metadata_pkey"), - db.Index("dataset_metadata_tenant_idx", "tenant_id"), - db.Index("dataset_metadata_dataset_idx", "dataset_id"), + sa.PrimaryKeyConstraint("id", name="dataset_metadata_pkey"), + sa.Index("dataset_metadata_tenant_idx", "tenant_id"), + sa.Index("dataset_metadata_dataset_idx", "dataset_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) type: Mapped[str] = mapped_column(String(255), nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) created_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) updated_at: Mapped[datetime] = mapped_column( - DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) created_by = mapped_column(StringUUID, nullable=False) updated_by = mapped_column(StringUUID, nullable=True) @@ -1141,14 +1142,14 @@ class DatasetMetadata(Base): class DatasetMetadataBinding(Base): __tablename__ = "dataset_metadata_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_metadata_binding_pkey"), - db.Index("dataset_metadata_binding_tenant_idx", "tenant_id"), - db.Index("dataset_metadata_binding_dataset_idx", "dataset_id"), - db.Index("dataset_metadata_binding_metadata_idx", "metadata_id"), - db.Index("dataset_metadata_binding_document_idx", "document_id"), + sa.PrimaryKeyConstraint("id", name="dataset_metadata_binding_pkey"), + sa.Index("dataset_metadata_binding_tenant_idx", "tenant_id"), + sa.Index("dataset_metadata_binding_dataset_idx", "dataset_id"), + sa.Index("dataset_metadata_binding_metadata_idx", "metadata_id"), + sa.Index("dataset_metadata_binding_document_idx", "document_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) metadata_id = mapped_column(StringUUID, nullable=False) diff --git a/api/models/model.py b/api/models/model.py index fba0d692eb..c4303f3cc5 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -35,10 +35,10 @@ from .types import StringUUID class DifySetup(Base): __tablename__ = "dify_setups" - __table_args__ = (db.PrimaryKeyConstraint("version", name="dify_setup_pkey"),) + __table_args__ = (sa.PrimaryKeyConstraint("version", name="dify_setup_pkey"),) version: Mapped[str] = mapped_column(String(255), nullable=False) - setup_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + setup_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class AppMode(StrEnum): @@ -69,33 +69,33 @@ class IconType(Enum): class App(Base): __tablename__ = "apps" - __table_args__ = (db.PrimaryKeyConstraint("id", name="app_pkey"), db.Index("app_tenant_id_idx", "tenant_id")) + __table_args__ = (sa.PrimaryKeyConstraint("id", name="app_pkey"), sa.Index("app_tenant_id_idx", "tenant_id")) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) name: Mapped[str] = mapped_column(String(255)) - description: Mapped[str] = mapped_column(db.Text, server_default=db.text("''::character varying")) + description: Mapped[str] = mapped_column(sa.Text, server_default=sa.text("''::character varying")) mode: Mapped[str] = mapped_column(String(255)) icon_type: Mapped[Optional[str]] = mapped_column(String(255)) # image, emoji icon = db.Column(String(255)) icon_background: Mapped[Optional[str]] = mapped_column(String(255)) app_model_config_id = mapped_column(StringUUID, nullable=True) workflow_id = mapped_column(StringUUID, nullable=True) - status: Mapped[str] = mapped_column(String(255), server_default=db.text("'normal'::character varying")) - enable_site: Mapped[bool] = mapped_column(db.Boolean) - enable_api: Mapped[bool] = mapped_column(db.Boolean) - api_rpm: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0")) - api_rph: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0")) - is_demo: Mapped[bool] = mapped_column(db.Boolean, server_default=db.text("false")) - is_public: Mapped[bool] = mapped_column(db.Boolean, server_default=db.text("false")) - is_universal: Mapped[bool] = mapped_column(db.Boolean, server_default=db.text("false")) - tracing = mapped_column(db.Text, nullable=True) + status: Mapped[str] = mapped_column(String(255), server_default=sa.text("'normal'::character varying")) + enable_site: Mapped[bool] = mapped_column(sa.Boolean) + enable_api: Mapped[bool] = mapped_column(sa.Boolean) + api_rpm: Mapped[int] = mapped_column(sa.Integer, server_default=sa.text("0")) + api_rph: Mapped[int] = mapped_column(sa.Integer, server_default=sa.text("0")) + is_demo: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) + is_public: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) + is_universal: Mapped[bool] = mapped_column(sa.Boolean, server_default=sa.text("false")) + tracing = mapped_column(sa.Text, nullable=True) max_active_requests: Mapped[Optional[int]] created_by = mapped_column(StringUUID, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) - updated_at: Mapped[datetime] = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - use_icon_as_answer_icon: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + updated_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + use_icon_as_answer_icon: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) @property def desc_or_prompt(self): @@ -302,36 +302,36 @@ class App(Base): class AppModelConfig(Base): __tablename__ = "app_model_configs" - __table_args__ = (db.PrimaryKeyConstraint("id", name="app_model_config_pkey"), db.Index("app_app_id_idx", "app_id")) + __table_args__ = (sa.PrimaryKeyConstraint("id", name="app_model_config_pkey"), sa.Index("app_app_id_idx", "app_id")) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) provider = mapped_column(String(255), nullable=True) model_id = mapped_column(String(255), nullable=True) - configs = mapped_column(db.JSON, nullable=True) + configs = mapped_column(sa.JSON, nullable=True) created_by = mapped_column(StringUUID, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - opening_statement = mapped_column(db.Text) - suggested_questions = mapped_column(db.Text) - suggested_questions_after_answer = mapped_column(db.Text) - speech_to_text = mapped_column(db.Text) - text_to_speech = mapped_column(db.Text) - more_like_this = mapped_column(db.Text) - model = mapped_column(db.Text) - user_input_form = mapped_column(db.Text) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + opening_statement = mapped_column(sa.Text) + suggested_questions = mapped_column(sa.Text) + suggested_questions_after_answer = mapped_column(sa.Text) + speech_to_text = mapped_column(sa.Text) + text_to_speech = mapped_column(sa.Text) + more_like_this = mapped_column(sa.Text) + model = mapped_column(sa.Text) + user_input_form = mapped_column(sa.Text) dataset_query_variable = mapped_column(String(255)) - pre_prompt = mapped_column(db.Text) - agent_mode = mapped_column(db.Text) - sensitive_word_avoidance = mapped_column(db.Text) - retriever_resource = mapped_column(db.Text) - prompt_type = mapped_column(String(255), nullable=False, server_default=db.text("'simple'::character varying")) - chat_prompt_config = mapped_column(db.Text) - completion_prompt_config = mapped_column(db.Text) - dataset_configs = mapped_column(db.Text) - external_data_tools = mapped_column(db.Text) - file_upload = mapped_column(db.Text) + pre_prompt = mapped_column(sa.Text) + agent_mode = mapped_column(sa.Text) + sensitive_word_avoidance = mapped_column(sa.Text) + retriever_resource = mapped_column(sa.Text) + prompt_type = mapped_column(String(255), nullable=False, server_default=sa.text("'simple'::character varying")) + chat_prompt_config = mapped_column(sa.Text) + completion_prompt_config = mapped_column(sa.Text) + dataset_configs = mapped_column(sa.Text) + external_data_tools = mapped_column(sa.Text) + file_upload = mapped_column(sa.Text) @property def app(self): @@ -553,24 +553,24 @@ class AppModelConfig(Base): class RecommendedApp(Base): __tablename__ = "recommended_apps" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="recommended_app_pkey"), - db.Index("recommended_app_app_id_idx", "app_id"), - db.Index("recommended_app_is_listed_idx", "is_listed", "language"), + sa.PrimaryKeyConstraint("id", name="recommended_app_pkey"), + sa.Index("recommended_app_app_id_idx", "app_id"), + sa.Index("recommended_app_is_listed_idx", "is_listed", "language"), ) - id = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) - description = mapped_column(db.JSON, nullable=False) + description = mapped_column(sa.JSON, nullable=False) copyright: Mapped[str] = mapped_column(String(255), nullable=False) privacy_policy: Mapped[str] = mapped_column(String(255), nullable=False) custom_disclaimer: Mapped[str] = mapped_column(sa.TEXT, default="") category: Mapped[str] = mapped_column(String(255), nullable=False) - position: Mapped[int] = mapped_column(db.Integer, nullable=False, default=0) - is_listed: Mapped[bool] = mapped_column(db.Boolean, nullable=False, default=True) - install_count: Mapped[int] = mapped_column(db.Integer, nullable=False, default=0) - language = mapped_column(String(255), nullable=False, server_default=db.text("'en-US'::character varying")) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + is_listed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) + install_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + language = mapped_column(String(255), nullable=False, server_default=sa.text("'en-US'::character varying")) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def app(self): @@ -581,20 +581,20 @@ class RecommendedApp(Base): class InstalledApp(Base): __tablename__ = "installed_apps" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="installed_app_pkey"), - db.Index("installed_app_tenant_id_idx", "tenant_id"), - db.Index("installed_app_app_id_idx", "app_id"), - db.UniqueConstraint("tenant_id", "app_id", name="unique_tenant_app"), + sa.PrimaryKeyConstraint("id", name="installed_app_pkey"), + sa.Index("installed_app_tenant_id_idx", "tenant_id"), + sa.Index("installed_app_app_id_idx", "app_id"), + sa.UniqueConstraint("tenant_id", "app_id", name="unique_tenant_app"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) app_id = mapped_column(StringUUID, nullable=False) app_owner_tenant_id = mapped_column(StringUUID, nullable=False) - position: Mapped[int] = mapped_column(db.Integer, nullable=False, default=0) - is_pinned: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) - last_used_at = mapped_column(db.DateTime, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) + is_pinned: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) + last_used_at = mapped_column(sa.DateTime, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def app(self): @@ -610,23 +610,23 @@ class InstalledApp(Base): class Conversation(Base): __tablename__ = "conversations" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="conversation_pkey"), - db.Index("conversation_app_from_user_idx", "app_id", "from_source", "from_end_user_id"), + sa.PrimaryKeyConstraint("id", name="conversation_pkey"), + sa.Index("conversation_app_from_user_idx", "app_id", "from_source", "from_end_user_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) app_model_config_id = mapped_column(StringUUID, nullable=True) model_provider = mapped_column(String(255), nullable=True) - override_model_configs = mapped_column(db.Text) + override_model_configs = mapped_column(sa.Text) model_id = mapped_column(String(255), nullable=True) mode: Mapped[str] = mapped_column(String(255)) name: Mapped[str] = mapped_column(String(255), nullable=False) - summary = mapped_column(db.Text) - _inputs: Mapped[dict] = mapped_column("inputs", db.JSON) - introduction = mapped_column(db.Text) - system_instruction = mapped_column(db.Text) - system_instruction_tokens: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) + summary = mapped_column(sa.Text) + _inputs: Mapped[dict] = mapped_column("inputs", sa.JSON) + introduction = mapped_column(sa.Text) + system_instruction = mapped_column(sa.Text) + system_instruction_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) status: Mapped[str] = mapped_column(String(255), nullable=False) # The `invoke_from` records how the conversation is created. @@ -639,18 +639,18 @@ class Conversation(Base): from_source: Mapped[str] = mapped_column(String(255), nullable=False) from_end_user_id = mapped_column(StringUUID) from_account_id = mapped_column(StringUUID) - read_at = mapped_column(db.DateTime) + read_at = mapped_column(sa.DateTime) read_account_id = mapped_column(StringUUID) dialogue_count: Mapped[int] = mapped_column(default=0) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) messages = db.relationship("Message", backref="conversation", lazy="select", passive_deletes="all") message_annotations = db.relationship( "MessageAnnotation", backref="conversation", lazy="select", passive_deletes="all" ) - is_deleted: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + is_deleted: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) @property def inputs(self): @@ -892,36 +892,36 @@ class Message(Base): Index("message_created_at_idx", "created_at"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) model_provider = mapped_column(String(255), nullable=True) model_id = mapped_column(String(255), nullable=True) - override_model_configs = mapped_column(db.Text) - conversation_id = mapped_column(StringUUID, db.ForeignKey("conversations.id"), nullable=False) - _inputs: Mapped[dict] = mapped_column("inputs", db.JSON) - query: Mapped[str] = mapped_column(db.Text, nullable=False) - message = mapped_column(db.JSON, nullable=False) - message_tokens: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) - message_unit_price = mapped_column(db.Numeric(10, 4), nullable=False) - message_price_unit = mapped_column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) - answer: Mapped[str] = db.Column(db.Text, nullable=False) # TODO make it mapped_column - answer_tokens: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) - answer_unit_price = mapped_column(db.Numeric(10, 4), nullable=False) - answer_price_unit = mapped_column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) + override_model_configs = mapped_column(sa.Text) + conversation_id = mapped_column(StringUUID, sa.ForeignKey("conversations.id"), nullable=False) + _inputs: Mapped[dict] = mapped_column("inputs", sa.JSON) + query: Mapped[str] = mapped_column(sa.Text, nullable=False) + message = mapped_column(sa.JSON, nullable=False) + message_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) + message_unit_price = mapped_column(sa.Numeric(10, 4), nullable=False) + message_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) + answer: Mapped[str] = db.Column(sa.Text, nullable=False) # TODO make it mapped_column + answer_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) + answer_unit_price = mapped_column(sa.Numeric(10, 4), nullable=False) + answer_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) parent_message_id = mapped_column(StringUUID, nullable=True) - provider_response_latency = mapped_column(db.Float, nullable=False, server_default=db.text("0")) - total_price = mapped_column(db.Numeric(10, 7)) + provider_response_latency = mapped_column(sa.Float, nullable=False, server_default=sa.text("0")) + total_price = mapped_column(sa.Numeric(10, 7)) currency: Mapped[str] = mapped_column(String(255), nullable=False) - status = mapped_column(String(255), nullable=False, server_default=db.text("'normal'::character varying")) - error = mapped_column(db.Text) - message_metadata = mapped_column(db.Text) + status = mapped_column(String(255), nullable=False, server_default=sa.text("'normal'::character varying")) + error = mapped_column(sa.Text) + message_metadata = mapped_column(sa.Text) invoke_from: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) from_source: Mapped[str] = mapped_column(String(255), nullable=False) from_end_user_id: Mapped[Optional[str]] = mapped_column(StringUUID) from_account_id: Mapped[Optional[str]] = mapped_column(StringUUID) - created_at: Mapped[datetime] = mapped_column(db.DateTime, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - agent_based: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + created_at: Mapped[datetime] = mapped_column(sa.DateTime, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + agent_based: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) workflow_run_id: Mapped[Optional[str]] = mapped_column(StringUUID) @property @@ -1228,23 +1228,23 @@ class Message(Base): class MessageFeedback(Base): __tablename__ = "message_feedbacks" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="message_feedback_pkey"), - db.Index("message_feedback_app_idx", "app_id"), - db.Index("message_feedback_message_idx", "message_id", "from_source"), - db.Index("message_feedback_conversation_idx", "conversation_id", "from_source", "rating"), + sa.PrimaryKeyConstraint("id", name="message_feedback_pkey"), + sa.Index("message_feedback_app_idx", "app_id"), + sa.Index("message_feedback_message_idx", "message_id", "from_source"), + sa.Index("message_feedback_conversation_idx", "conversation_id", "from_source", "rating"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) conversation_id = mapped_column(StringUUID, nullable=False) message_id = mapped_column(StringUUID, nullable=False) rating: Mapped[str] = mapped_column(String(255), nullable=False) - content = mapped_column(db.Text) + content = mapped_column(sa.Text) from_source: Mapped[str] = mapped_column(String(255), nullable=False) from_end_user_id = mapped_column(StringUUID) from_account_id = mapped_column(StringUUID) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def from_account(self): @@ -1270,9 +1270,9 @@ class MessageFeedback(Base): class MessageFile(Base): __tablename__ = "message_files" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="message_file_pkey"), - db.Index("message_file_message_idx", "message_id"), - db.Index("message_file_created_by_idx", "created_by"), + sa.PrimaryKeyConstraint("id", name="message_file_pkey"), + sa.Index("message_file_message_idx", "message_id"), + sa.Index("message_file_created_by_idx", "created_by"), ) def __init__( @@ -1296,37 +1296,37 @@ class MessageFile(Base): self.created_by_role = created_by_role.value self.created_by = created_by - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) message_id: Mapped[str] = mapped_column(StringUUID, nullable=False) type: Mapped[str] = mapped_column(String(255), nullable=False) transfer_method: Mapped[str] = mapped_column(String(255), nullable=False) - url: Mapped[Optional[str]] = mapped_column(db.Text, nullable=True) + url: Mapped[Optional[str]] = mapped_column(sa.Text, nullable=True) belongs_to: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) upload_file_id: Mapped[Optional[str]] = mapped_column(StringUUID, nullable=True) created_by_role: Mapped[str] = mapped_column(String(255), nullable=False) created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class MessageAnnotation(Base): __tablename__ = "message_annotations" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="message_annotation_pkey"), - db.Index("message_annotation_app_idx", "app_id"), - db.Index("message_annotation_conversation_idx", "conversation_id"), - db.Index("message_annotation_message_idx", "message_id"), + sa.PrimaryKeyConstraint("id", name="message_annotation_pkey"), + sa.Index("message_annotation_app_idx", "app_id"), + sa.Index("message_annotation_conversation_idx", "conversation_id"), + sa.Index("message_annotation_message_idx", "message_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id: Mapped[str] = mapped_column(StringUUID) - conversation_id: Mapped[Optional[str]] = mapped_column(StringUUID, db.ForeignKey("conversations.id")) + conversation_id: Mapped[Optional[str]] = mapped_column(StringUUID, sa.ForeignKey("conversations.id")) message_id: Mapped[Optional[str]] = mapped_column(StringUUID) - question = db.Column(db.Text, nullable=True) - content = mapped_column(db.Text, nullable=False) - hit_count: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) + question = db.Column(sa.Text, nullable=True) + content = mapped_column(sa.Text, nullable=False) + hit_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) account_id = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def account(self): @@ -1342,24 +1342,24 @@ class MessageAnnotation(Base): class AppAnnotationHitHistory(Base): __tablename__ = "app_annotation_hit_histories" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="app_annotation_hit_histories_pkey"), - db.Index("app_annotation_hit_histories_app_idx", "app_id"), - db.Index("app_annotation_hit_histories_account_idx", "account_id"), - db.Index("app_annotation_hit_histories_annotation_idx", "annotation_id"), - db.Index("app_annotation_hit_histories_message_idx", "message_id"), + sa.PrimaryKeyConstraint("id", name="app_annotation_hit_histories_pkey"), + sa.Index("app_annotation_hit_histories_app_idx", "app_id"), + sa.Index("app_annotation_hit_histories_account_idx", "account_id"), + sa.Index("app_annotation_hit_histories_annotation_idx", "annotation_id"), + sa.Index("app_annotation_hit_histories_message_idx", "message_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) annotation_id: Mapped[str] = mapped_column(StringUUID, nullable=False) - source = mapped_column(db.Text, nullable=False) - question = mapped_column(db.Text, nullable=False) + source = mapped_column(sa.Text, nullable=False) + question = mapped_column(sa.Text, nullable=False) account_id = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - score = mapped_column(Float, nullable=False, server_default=db.text("0")) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + score = mapped_column(Float, nullable=False, server_default=sa.text("0")) message_id = mapped_column(StringUUID, nullable=False) - annotation_question = mapped_column(db.Text, nullable=False) - annotation_content = mapped_column(db.Text, nullable=False) + annotation_question = mapped_column(sa.Text, nullable=False) + annotation_content = mapped_column(sa.Text, nullable=False) @property def account(self): @@ -1380,18 +1380,18 @@ class AppAnnotationHitHistory(Base): class AppAnnotationSetting(Base): __tablename__ = "app_annotation_settings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="app_annotation_settings_pkey"), - db.Index("app_annotation_settings_app_idx", "app_id"), + sa.PrimaryKeyConstraint("id", name="app_annotation_settings_pkey"), + sa.Index("app_annotation_settings_app_idx", "app_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) - score_threshold = mapped_column(Float, nullable=False, server_default=db.text("0")) + score_threshold = mapped_column(Float, nullable=False, server_default=sa.text("0")) collection_binding_id = mapped_column(StringUUID, nullable=False) created_user_id = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_user_id = mapped_column(StringUUID, nullable=False) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @property def collection_binding_detail(self): @@ -1408,58 +1408,58 @@ class AppAnnotationSetting(Base): class OperationLog(Base): __tablename__ = "operation_logs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="operation_log_pkey"), - db.Index("operation_log_account_action_idx", "tenant_id", "account_id", "action"), + sa.PrimaryKeyConstraint("id", name="operation_log_pkey"), + sa.Index("operation_log_account_action_idx", "tenant_id", "account_id", "action"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) account_id = mapped_column(StringUUID, nullable=False) action: Mapped[str] = mapped_column(String(255), nullable=False) - content = mapped_column(db.JSON) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + content = mapped_column(sa.JSON) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) created_ip: Mapped[str] = mapped_column(String(255), nullable=False) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class EndUser(Base, UserMixin): __tablename__ = "end_users" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="end_user_pkey"), - db.Index("end_user_session_id_idx", "session_id", "type"), - db.Index("end_user_tenant_session_id_idx", "tenant_id", "session_id", "type"), + sa.PrimaryKeyConstraint("id", name="end_user_pkey"), + sa.Index("end_user_session_id_idx", "session_id", "type"), + sa.Index("end_user_tenant_session_id_idx", "tenant_id", "session_id", "type"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) app_id = mapped_column(StringUUID, nullable=True) type: Mapped[str] = mapped_column(String(255), nullable=False) external_user_id = mapped_column(String(255), nullable=True) name = mapped_column(String(255)) - is_anonymous: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + is_anonymous: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) session_id: Mapped[str] = mapped_column() - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class AppMCPServer(Base): __tablename__ = "app_mcp_servers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="app_mcp_server_pkey"), - db.UniqueConstraint("tenant_id", "app_id", name="unique_app_mcp_server_tenant_app_id"), - db.UniqueConstraint("server_code", name="unique_app_mcp_server_server_code"), + sa.PrimaryKeyConstraint("id", name="app_mcp_server_pkey"), + sa.UniqueConstraint("tenant_id", "app_id", name="unique_app_mcp_server_tenant_app_id"), + sa.UniqueConstraint("server_code", name="unique_app_mcp_server_server_code"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) app_id = mapped_column(StringUUID, nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) description: Mapped[str] = mapped_column(String(255), nullable=False) server_code: Mapped[str] = mapped_column(String(255), nullable=False) - status = mapped_column(String(255), nullable=False, server_default=db.text("'normal'::character varying")) - parameters = mapped_column(db.Text, nullable=False) + status = mapped_column(String(255), nullable=False, server_default=sa.text("'normal'::character varying")) + parameters = mapped_column(sa.Text, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @staticmethod def generate_server_code(n): @@ -1478,34 +1478,34 @@ class AppMCPServer(Base): class Site(Base): __tablename__ = "sites" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="site_pkey"), - db.Index("site_app_id_idx", "app_id"), - db.Index("site_code_idx", "code", "status"), + sa.PrimaryKeyConstraint("id", name="site_pkey"), + sa.Index("site_app_id_idx", "app_id"), + sa.Index("site_code_idx", "code", "status"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) title: Mapped[str] = mapped_column(String(255), nullable=False) icon_type = mapped_column(String(255), nullable=True) icon = mapped_column(String(255)) icon_background = mapped_column(String(255)) - description = mapped_column(db.Text) + description = mapped_column(sa.Text) default_language: Mapped[str] = mapped_column(String(255), nullable=False) chat_color_theme = mapped_column(String(255)) - chat_color_theme_inverted: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + chat_color_theme_inverted: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) copyright = mapped_column(String(255)) privacy_policy = mapped_column(String(255)) - show_workflow_steps: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) - use_icon_as_answer_icon: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + show_workflow_steps: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) + use_icon_as_answer_icon: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) _custom_disclaimer: Mapped[str] = mapped_column("custom_disclaimer", sa.TEXT, default="") customize_domain = mapped_column(String(255)) customize_token_strategy: Mapped[str] = mapped_column(String(255), nullable=False) - prompt_public: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) - status = mapped_column(String(255), nullable=False, server_default=db.text("'normal'::character varying")) + prompt_public: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) + status = mapped_column(String(255), nullable=False, server_default=sa.text("'normal'::character varying")) created_by = mapped_column(StringUUID, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_by = mapped_column(StringUUID, nullable=True) - updated_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) code = mapped_column(String(255)) @property @@ -1535,19 +1535,19 @@ class Site(Base): class ApiToken(Base): __tablename__ = "api_tokens" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="api_token_pkey"), - db.Index("api_token_app_id_type_idx", "app_id", "type"), - db.Index("api_token_token_idx", "token", "type"), - db.Index("api_token_tenant_idx", "tenant_id", "type"), + sa.PrimaryKeyConstraint("id", name="api_token_pkey"), + sa.Index("api_token_app_id_type_idx", "app_id", "type"), + sa.Index("api_token_token_idx", "token", "type"), + sa.Index("api_token_tenant_idx", "tenant_id", "type"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=True) tenant_id = mapped_column(StringUUID, nullable=True) type = mapped_column(String(16), nullable=False) token: Mapped[str] = mapped_column(String(255), nullable=False) - last_used_at = mapped_column(db.DateTime, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + last_used_at = mapped_column(sa.DateTime, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @staticmethod def generate_api_key(prefix, n): @@ -1561,26 +1561,26 @@ class ApiToken(Base): class UploadFile(Base): __tablename__ = "upload_files" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="upload_file_pkey"), - db.Index("upload_file_tenant_idx", "tenant_id"), + sa.PrimaryKeyConstraint("id", name="upload_file_pkey"), + sa.Index("upload_file_tenant_idx", "tenant_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) storage_type: Mapped[str] = mapped_column(String(255), nullable=False) key: Mapped[str] = mapped_column(String(255), nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) - size: Mapped[int] = mapped_column(db.Integer, nullable=False) + size: Mapped[int] = mapped_column(sa.Integer, nullable=False) extension: Mapped[str] = mapped_column(String(255), nullable=False) mime_type: Mapped[str] = mapped_column(String(255), nullable=True) created_by_role: Mapped[str] = mapped_column( - String(255), nullable=False, server_default=db.text("'account'::character varying") + String(255), nullable=False, server_default=sa.text("'account'::character varying") ) created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) - used: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + used: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) used_by: Mapped[str | None] = mapped_column(StringUUID, nullable=True) - used_at: Mapped[datetime | None] = mapped_column(db.DateTime, nullable=True) + used_at: Mapped[datetime | None] = mapped_column(sa.DateTime, nullable=True) hash: Mapped[str | None] = mapped_column(String(255), nullable=True) source_url: Mapped[str] = mapped_column(sa.TEXT, default="") @@ -1623,71 +1623,71 @@ class UploadFile(Base): class ApiRequest(Base): __tablename__ = "api_requests" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="api_request_pkey"), - db.Index("api_request_token_idx", "tenant_id", "api_token_id"), + sa.PrimaryKeyConstraint("id", name="api_request_pkey"), + sa.Index("api_request_token_idx", "tenant_id", "api_token_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) api_token_id = mapped_column(StringUUID, nullable=False) path: Mapped[str] = mapped_column(String(255), nullable=False) - request = mapped_column(db.Text, nullable=True) - response = mapped_column(db.Text, nullable=True) + request = mapped_column(sa.Text, nullable=True) + response = mapped_column(sa.Text, nullable=True) ip: Mapped[str] = mapped_column(String(255), nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class MessageChain(Base): __tablename__ = "message_chains" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="message_chain_pkey"), - db.Index("message_chain_message_id_idx", "message_id"), + sa.PrimaryKeyConstraint("id", name="message_chain_pkey"), + sa.Index("message_chain_message_id_idx", "message_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) message_id = mapped_column(StringUUID, nullable=False) type: Mapped[str] = mapped_column(String(255), nullable=False) - input = mapped_column(db.Text, nullable=True) - output = mapped_column(db.Text, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=db.func.current_timestamp()) + input = mapped_column(sa.Text, nullable=True) + output = mapped_column(sa.Text, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=db.func.current_timestamp()) class MessageAgentThought(Base): __tablename__ = "message_agent_thoughts" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="message_agent_thought_pkey"), - db.Index("message_agent_thought_message_id_idx", "message_id"), - db.Index("message_agent_thought_message_chain_id_idx", "message_chain_id"), + sa.PrimaryKeyConstraint("id", name="message_agent_thought_pkey"), + sa.Index("message_agent_thought_message_id_idx", "message_id"), + sa.Index("message_agent_thought_message_chain_id_idx", "message_chain_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) message_id = mapped_column(StringUUID, nullable=False) message_chain_id = mapped_column(StringUUID, nullable=True) - position: Mapped[int] = mapped_column(db.Integer, nullable=False) - thought = mapped_column(db.Text, nullable=True) - tool = mapped_column(db.Text, nullable=True) - tool_labels_str = mapped_column(db.Text, nullable=False, server_default=db.text("'{}'::text")) - tool_meta_str = mapped_column(db.Text, nullable=False, server_default=db.text("'{}'::text")) - tool_input = mapped_column(db.Text, nullable=True) - observation = mapped_column(db.Text, nullable=True) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False) + thought = mapped_column(sa.Text, nullable=True) + tool = mapped_column(sa.Text, nullable=True) + tool_labels_str = mapped_column(sa.Text, nullable=False, server_default=sa.text("'{}'::text")) + tool_meta_str = mapped_column(sa.Text, nullable=False, server_default=sa.text("'{}'::text")) + tool_input = mapped_column(sa.Text, nullable=True) + observation = mapped_column(sa.Text, nullable=True) # plugin_id = mapped_column(StringUUID, nullable=True) ## for future design - tool_process_data = mapped_column(db.Text, nullable=True) - message = mapped_column(db.Text, nullable=True) - message_token: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - message_unit_price = mapped_column(db.Numeric, nullable=True) - message_price_unit = mapped_column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) - message_files = mapped_column(db.Text, nullable=True) - answer = db.Column(db.Text, nullable=True) - answer_token: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - answer_unit_price = mapped_column(db.Numeric, nullable=True) - answer_price_unit = mapped_column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) - tokens: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - total_price = mapped_column(db.Numeric, nullable=True) + tool_process_data = mapped_column(sa.Text, nullable=True) + message = mapped_column(sa.Text, nullable=True) + message_token: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + message_unit_price = mapped_column(sa.Numeric, nullable=True) + message_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) + message_files = mapped_column(sa.Text, nullable=True) + answer = db.Column(sa.Text, nullable=True) + answer_token: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + answer_unit_price = mapped_column(sa.Numeric, nullable=True) + answer_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) + tokens: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + total_price = mapped_column(sa.Numeric, nullable=True) currency = mapped_column(String, nullable=True) - latency: Mapped[Optional[float]] = mapped_column(db.Float, nullable=True) + latency: Mapped[Optional[float]] = mapped_column(sa.Float, nullable=True) created_by_role = mapped_column(String, nullable=False) created_by = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=db.func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=db.func.current_timestamp()) @property def files(self) -> list: @@ -1769,80 +1769,80 @@ class MessageAgentThought(Base): class DatasetRetrieverResource(Base): __tablename__ = "dataset_retriever_resources" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="dataset_retriever_resource_pkey"), - db.Index("dataset_retriever_resource_message_id_idx", "message_id"), + sa.PrimaryKeyConstraint("id", name="dataset_retriever_resource_pkey"), + sa.Index("dataset_retriever_resource_message_id_idx", "message_id"), ) - id = mapped_column(StringUUID, nullable=False, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, nullable=False, server_default=sa.text("uuid_generate_v4()")) message_id = mapped_column(StringUUID, nullable=False) - position: Mapped[int] = mapped_column(db.Integer, nullable=False) + position: Mapped[int] = mapped_column(sa.Integer, nullable=False) dataset_id = mapped_column(StringUUID, nullable=False) - dataset_name = mapped_column(db.Text, nullable=False) + dataset_name = mapped_column(sa.Text, nullable=False) document_id = mapped_column(StringUUID, nullable=True) - document_name = mapped_column(db.Text, nullable=False) - data_source_type = mapped_column(db.Text, nullable=True) + document_name = mapped_column(sa.Text, nullable=False) + data_source_type = mapped_column(sa.Text, nullable=True) segment_id = mapped_column(StringUUID, nullable=True) - score: Mapped[Optional[float]] = mapped_column(db.Float, nullable=True) - content = mapped_column(db.Text, nullable=False) - hit_count: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - word_count: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - segment_position: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) - index_node_hash = mapped_column(db.Text, nullable=True) - retriever_from = mapped_column(db.Text, nullable=False) + score: Mapped[Optional[float]] = mapped_column(sa.Float, nullable=True) + content = mapped_column(sa.Text, nullable=False) + hit_count: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + word_count: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + segment_position: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) + index_node_hash = mapped_column(sa.Text, nullable=True) + retriever_from = mapped_column(sa.Text, nullable=False) created_by = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=db.func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=db.func.current_timestamp()) class Tag(Base): __tablename__ = "tags" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tag_pkey"), - db.Index("tag_type_idx", "type"), - db.Index("tag_name_idx", "name"), + sa.PrimaryKeyConstraint("id", name="tag_pkey"), + sa.Index("tag_type_idx", "type"), + sa.Index("tag_name_idx", "name"), ) TAG_TYPE_LIST = ["knowledge", "app"] - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=True) type = mapped_column(String(16), nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) created_by = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class TagBinding(Base): __tablename__ = "tag_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tag_binding_pkey"), - db.Index("tag_bind_target_id_idx", "target_id"), - db.Index("tag_bind_tag_id_idx", "tag_id"), + sa.PrimaryKeyConstraint("id", name="tag_binding_pkey"), + sa.Index("tag_bind_target_id_idx", "target_id"), + sa.Index("tag_bind_tag_id_idx", "tag_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=True) tag_id = mapped_column(StringUUID, nullable=True) target_id = mapped_column(StringUUID, nullable=True) created_by = mapped_column(StringUUID, nullable=False) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) class TraceAppConfig(Base): __tablename__ = "trace_app_config" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tracing_app_config_pkey"), - db.Index("trace_app_config_app_id_idx", "app_id"), + sa.PrimaryKeyConstraint("id", name="tracing_app_config_pkey"), + sa.Index("trace_app_config_app_id_idx", "app_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) tracing_provider = mapped_column(String(255), nullable=True) - tracing_config = mapped_column(db.JSON, nullable=True) - created_at = mapped_column(db.DateTime, nullable=False, server_default=func.current_timestamp()) + tracing_config = mapped_column(sa.JSON, nullable=True) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_at = mapped_column( - db.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + sa.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() ) - is_active: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + is_active: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) @property def tracing_config_dict(self): diff --git a/api/models/provider.py b/api/models/provider.py index 7bfc249b0b..4ea2c59fdb 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -2,11 +2,11 @@ from datetime import datetime from enum import Enum from typing import Optional +import sqlalchemy as sa from sqlalchemy import DateTime, String, func, text from sqlalchemy.orm import Mapped, mapped_column from .base import Base -from .engine import db from .types import StringUUID @@ -47,9 +47,9 @@ class Provider(Base): __tablename__ = "providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="provider_pkey"), - db.Index("provider_tenant_id_provider_idx", "tenant_id", "provider_name"), - db.UniqueConstraint( + sa.PrimaryKeyConstraint("id", name="provider_pkey"), + sa.Index("provider_tenant_id_provider_idx", "tenant_id", "provider_name"), + sa.UniqueConstraint( "tenant_id", "provider_name", "provider_type", "quota_type", name="unique_provider_name_type_quota" ), ) @@ -60,15 +60,15 @@ class Provider(Base): provider_type: Mapped[str] = mapped_column( String(40), nullable=False, server_default=text("'custom'::character varying") ) - encrypted_config: Mapped[Optional[str]] = mapped_column(db.Text, nullable=True) - is_valid: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=text("false")) + encrypted_config: Mapped[Optional[str]] = mapped_column(sa.Text, nullable=True) + is_valid: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=text("false")) last_used: Mapped[Optional[datetime]] = mapped_column(DateTime, nullable=True) quota_type: Mapped[Optional[str]] = mapped_column( String(40), nullable=True, server_default=text("''::character varying") ) - quota_limit: Mapped[Optional[int]] = mapped_column(db.BigInteger, nullable=True) - quota_used: Mapped[Optional[int]] = mapped_column(db.BigInteger, default=0) + quota_limit: Mapped[Optional[int]] = mapped_column(sa.BigInteger, nullable=True) + quota_used: Mapped[Optional[int]] = mapped_column(sa.BigInteger, default=0) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -104,9 +104,9 @@ class ProviderModel(Base): __tablename__ = "provider_models" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="provider_model_pkey"), - db.Index("provider_model_tenant_id_provider_idx", "tenant_id", "provider_name"), - db.UniqueConstraint( + sa.PrimaryKeyConstraint("id", name="provider_model_pkey"), + sa.Index("provider_model_tenant_id_provider_idx", "tenant_id", "provider_name"), + sa.UniqueConstraint( "tenant_id", "provider_name", "model_name", "model_type", name="unique_provider_model_name" ), ) @@ -116,8 +116,8 @@ class ProviderModel(Base): provider_name: Mapped[str] = mapped_column(String(255), nullable=False) model_name: Mapped[str] = mapped_column(String(255), nullable=False) model_type: Mapped[str] = mapped_column(String(40), nullable=False) - encrypted_config: Mapped[Optional[str]] = mapped_column(db.Text, nullable=True) - is_valid: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=text("false")) + encrypted_config: Mapped[Optional[str]] = mapped_column(sa.Text, nullable=True) + is_valid: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=text("false")) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -125,8 +125,8 @@ class ProviderModel(Base): class TenantDefaultModel(Base): __tablename__ = "tenant_default_models" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tenant_default_model_pkey"), - db.Index("tenant_default_model_tenant_id_provider_type_idx", "tenant_id", "provider_name", "model_type"), + sa.PrimaryKeyConstraint("id", name="tenant_default_model_pkey"), + sa.Index("tenant_default_model_tenant_id_provider_type_idx", "tenant_id", "provider_name", "model_type"), ) id: Mapped[str] = mapped_column(StringUUID, server_default=text("uuid_generate_v4()")) @@ -141,8 +141,8 @@ class TenantDefaultModel(Base): class TenantPreferredModelProvider(Base): __tablename__ = "tenant_preferred_model_providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tenant_preferred_model_provider_pkey"), - db.Index("tenant_preferred_model_provider_tenant_provider_idx", "tenant_id", "provider_name"), + sa.PrimaryKeyConstraint("id", name="tenant_preferred_model_provider_pkey"), + sa.Index("tenant_preferred_model_provider_tenant_provider_idx", "tenant_id", "provider_name"), ) id: Mapped[str] = mapped_column(StringUUID, server_default=text("uuid_generate_v4()")) @@ -156,8 +156,8 @@ class TenantPreferredModelProvider(Base): class ProviderOrder(Base): __tablename__ = "provider_orders" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="provider_order_pkey"), - db.Index("provider_order_tenant_provider_idx", "tenant_id", "provider_name"), + sa.PrimaryKeyConstraint("id", name="provider_order_pkey"), + sa.Index("provider_order_tenant_provider_idx", "tenant_id", "provider_name"), ) id: Mapped[str] = mapped_column(StringUUID, server_default=text("uuid_generate_v4()")) @@ -167,9 +167,9 @@ class ProviderOrder(Base): payment_product_id: Mapped[str] = mapped_column(String(191), nullable=False) payment_id: Mapped[Optional[str]] = mapped_column(String(191)) transaction_id: Mapped[Optional[str]] = mapped_column(String(191)) - quantity: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=text("1")) + quantity: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=text("1")) currency: Mapped[Optional[str]] = mapped_column(String(40)) - total_amount: Mapped[Optional[int]] = mapped_column(db.Integer) + total_amount: Mapped[Optional[int]] = mapped_column(sa.Integer) payment_status: Mapped[str] = mapped_column( String(40), nullable=False, server_default=text("'wait_pay'::character varying") ) @@ -187,8 +187,8 @@ class ProviderModelSetting(Base): __tablename__ = "provider_model_settings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="provider_model_setting_pkey"), - db.Index("provider_model_setting_tenant_provider_model_idx", "tenant_id", "provider_name", "model_type"), + sa.PrimaryKeyConstraint("id", name="provider_model_setting_pkey"), + sa.Index("provider_model_setting_tenant_provider_model_idx", "tenant_id", "provider_name", "model_type"), ) id: Mapped[str] = mapped_column(StringUUID, server_default=text("uuid_generate_v4()")) @@ -196,8 +196,8 @@ class ProviderModelSetting(Base): provider_name: Mapped[str] = mapped_column(String(255), nullable=False) model_name: Mapped[str] = mapped_column(String(255), nullable=False) model_type: Mapped[str] = mapped_column(String(40), nullable=False) - enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=text("true")) - load_balancing_enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=text("false")) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=text("true")) + load_balancing_enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=text("false")) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -209,8 +209,8 @@ class LoadBalancingModelConfig(Base): __tablename__ = "load_balancing_model_configs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="load_balancing_model_config_pkey"), - db.Index("load_balancing_model_config_tenant_provider_model_idx", "tenant_id", "provider_name", "model_type"), + sa.PrimaryKeyConstraint("id", name="load_balancing_model_config_pkey"), + sa.Index("load_balancing_model_config_tenant_provider_model_idx", "tenant_id", "provider_name", "model_type"), ) id: Mapped[str] = mapped_column(StringUUID, server_default=text("uuid_generate_v4()")) @@ -219,7 +219,7 @@ class LoadBalancingModelConfig(Base): model_name: Mapped[str] = mapped_column(String(255), nullable=False) model_type: Mapped[str] = mapped_column(String(40), nullable=False) name: Mapped[str] = mapped_column(String(255), nullable=False) - encrypted_config: Mapped[Optional[str]] = mapped_column(db.Text, nullable=True) - enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=text("true")) + encrypted_config: Mapped[Optional[str]] = mapped_column(sa.Text, nullable=True) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=text("true")) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/api/models/source.py b/api/models/source.py index 8191c874a4..8456d65a87 100644 --- a/api/models/source.py +++ b/api/models/source.py @@ -2,50 +2,50 @@ import json from datetime import datetime from typing import Optional +import sqlalchemy as sa from sqlalchemy import DateTime, String, func from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Mapped, mapped_column from models.base import Base -from .engine import db from .types import StringUUID class DataSourceOauthBinding(Base): __tablename__ = "data_source_oauth_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="source_binding_pkey"), - db.Index("source_binding_tenant_id_idx", "tenant_id"), - db.Index("source_info_idx", "source_info", postgresql_using="gin"), + sa.PrimaryKeyConstraint("id", name="source_binding_pkey"), + sa.Index("source_binding_tenant_id_idx", "tenant_id"), + sa.Index("source_info_idx", "source_info", postgresql_using="gin"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) access_token: Mapped[str] = mapped_column(String(255), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) source_info = mapped_column(JSONB, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - disabled: Mapped[Optional[bool]] = mapped_column(db.Boolean, nullable=True, server_default=db.text("false")) + disabled: Mapped[Optional[bool]] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) class DataSourceApiKeyAuthBinding(Base): __tablename__ = "data_source_api_key_auth_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="data_source_api_key_auth_binding_pkey"), - db.Index("data_source_api_key_auth_binding_tenant_id_idx", "tenant_id"), - db.Index("data_source_api_key_auth_binding_provider_idx", "provider"), + sa.PrimaryKeyConstraint("id", name="data_source_api_key_auth_binding_pkey"), + sa.Index("data_source_api_key_auth_binding_tenant_id_idx", "tenant_id"), + sa.Index("data_source_api_key_auth_binding_provider_idx", "provider"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id = mapped_column(StringUUID, nullable=False) category: Mapped[str] = mapped_column(String(255), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) - credentials = mapped_column(db.Text, nullable=True) # JSON + credentials = mapped_column(sa.Text, nullable=True) # JSON created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) updated_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) - disabled: Mapped[Optional[bool]] = mapped_column(db.Boolean, nullable=True, server_default=db.text("false")) + disabled: Mapped[Optional[bool]] = mapped_column(sa.Boolean, nullable=True, server_default=sa.text("false")) def to_dict(self): return { diff --git a/api/models/task.py b/api/models/task.py index 66a47ea4df..ab700c553c 100644 --- a/api/models/task.py +++ b/api/models/task.py @@ -1,6 +1,7 @@ from datetime import datetime from typing import Optional +import sqlalchemy as sa from celery import states # type: ignore from sqlalchemy import DateTime, String from sqlalchemy.orm import Mapped, mapped_column @@ -16,7 +17,7 @@ class CeleryTask(Base): __tablename__ = "celery_taskmeta" - id = mapped_column(db.Integer, db.Sequence("task_id_sequence"), primary_key=True, autoincrement=True) + id = mapped_column(sa.Integer, sa.Sequence("task_id_sequence"), primary_key=True, autoincrement=True) task_id = mapped_column(String(155), unique=True) status = mapped_column(String(50), default=states.PENDING) result = mapped_column(db.PickleType, nullable=True) @@ -26,12 +27,12 @@ class CeleryTask(Base): onupdate=lambda: naive_utc_now(), nullable=True, ) - traceback = mapped_column(db.Text, nullable=True) + traceback = mapped_column(sa.Text, nullable=True) name = mapped_column(String(155), nullable=True) - args = mapped_column(db.LargeBinary, nullable=True) - kwargs = mapped_column(db.LargeBinary, nullable=True) + args = mapped_column(sa.LargeBinary, nullable=True) + kwargs = mapped_column(sa.LargeBinary, nullable=True) worker = mapped_column(String(155), nullable=True) - retries: Mapped[Optional[int]] = mapped_column(db.Integer, nullable=True) + retries: Mapped[Optional[int]] = mapped_column(sa.Integer, nullable=True) queue = mapped_column(String(155), nullable=True) @@ -41,7 +42,7 @@ class CeleryTaskSet(Base): __tablename__ = "celery_tasksetmeta" id: Mapped[int] = mapped_column( - db.Integer, db.Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True + sa.Integer, sa.Sequence("taskset_id_sequence"), autoincrement=True, primary_key=True ) taskset_id = mapped_column(String(155), unique=True) result = mapped_column(db.PickleType, nullable=True) diff --git a/api/models/tools.py b/api/models/tools.py index 1491cd90ce..408c1371c2 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -25,33 +25,33 @@ from .types import StringUUID class ToolOAuthSystemClient(Base): __tablename__ = "tool_oauth_system_clients" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_oauth_system_client_pkey"), - db.UniqueConstraint("plugin_id", "provider", name="tool_oauth_system_client_plugin_id_provider_idx"), + sa.PrimaryKeyConstraint("id", name="tool_oauth_system_client_pkey"), + sa.UniqueConstraint("plugin_id", "provider", name="tool_oauth_system_client_plugin_id_provider_idx"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) plugin_id = mapped_column(String(512), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) # oauth params of the tool provider - encrypted_oauth_params: Mapped[str] = mapped_column(db.Text, nullable=False) + encrypted_oauth_params: Mapped[str] = mapped_column(sa.Text, nullable=False) # tenant level tool oauth client params (client_id, client_secret, etc.) class ToolOAuthTenantClient(Base): __tablename__ = "tool_oauth_tenant_clients" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_oauth_tenant_client_pkey"), - db.UniqueConstraint("tenant_id", "plugin_id", "provider", name="unique_tool_oauth_tenant_client"), + sa.PrimaryKeyConstraint("id", name="tool_oauth_tenant_client_pkey"), + sa.UniqueConstraint("tenant_id", "plugin_id", "provider", name="unique_tool_oauth_tenant_client"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # tenant id tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) plugin_id: Mapped[str] = mapped_column(String(512), nullable=False) provider: Mapped[str] = mapped_column(String(255), nullable=False) - enabled: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("true")) + enabled: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("true")) # oauth params of the tool provider - encrypted_oauth_params: Mapped[str] = mapped_column(db.Text, nullable=False) + encrypted_oauth_params: Mapped[str] = mapped_column(sa.Text, nullable=False) @property def oauth_params(self) -> dict: @@ -65,14 +65,14 @@ class BuiltinToolProvider(Base): __tablename__ = "tool_builtin_providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_builtin_provider_pkey"), - db.UniqueConstraint("tenant_id", "provider", "name", name="unique_builtin_tool_provider"), + sa.PrimaryKeyConstraint("id", name="tool_builtin_provider_pkey"), + sa.UniqueConstraint("tenant_id", "provider", "name", name="unique_builtin_tool_provider"), ) # id of the tool provider - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) name: Mapped[str] = mapped_column( - String(256), nullable=False, server_default=db.text("'API KEY 1'::character varying") + String(256), nullable=False, server_default=sa.text("'API KEY 1'::character varying") ) # id of the tenant tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=True) @@ -81,19 +81,19 @@ class BuiltinToolProvider(Base): # name of the tool provider provider: Mapped[str] = mapped_column(String(256), nullable=False) # credential of the tool provider - encrypted_credentials: Mapped[str] = mapped_column(db.Text, nullable=True) + encrypted_credentials: Mapped[str] = mapped_column(sa.Text, nullable=True) created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) - is_default: Mapped[bool] = mapped_column(db.Boolean, nullable=False, server_default=db.text("false")) + is_default: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) # credential type, e.g., "api-key", "oauth2" credential_type: Mapped[str] = mapped_column( - String(32), nullable=False, server_default=db.text("'api-key'::character varying") + String(32), nullable=False, server_default=sa.text("'api-key'::character varying") ) - expires_at: Mapped[int] = mapped_column(db.BigInteger, nullable=False, server_default=db.text("-1")) + expires_at: Mapped[int] = mapped_column(sa.BigInteger, nullable=False, server_default=sa.text("-1")) @property def credentials(self) -> dict: @@ -107,28 +107,28 @@ class ApiToolProvider(Base): __tablename__ = "tool_api_providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_api_provider_pkey"), - db.UniqueConstraint("name", "tenant_id", name="unique_api_tool_provider"), + sa.PrimaryKeyConstraint("id", name="tool_api_provider_pkey"), + sa.UniqueConstraint("name", "tenant_id", name="unique_api_tool_provider"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # name of the api provider - name = mapped_column(String(255), nullable=False, server_default=db.text("'API KEY 1'::character varying")) + name = mapped_column(String(255), nullable=False, server_default=sa.text("'API KEY 1'::character varying")) # icon icon: Mapped[str] = mapped_column(String(255), nullable=False) # original schema - schema = mapped_column(db.Text, nullable=False) + schema = mapped_column(sa.Text, nullable=False) schema_type_str: Mapped[str] = mapped_column(String(40), nullable=False) # who created this tool user_id = mapped_column(StringUUID, nullable=False) # tenant id tenant_id = mapped_column(StringUUID, nullable=False) # description of the provider - description = mapped_column(db.Text, nullable=False) + description = mapped_column(sa.Text, nullable=False) # json format tools - tools_str = mapped_column(db.Text, nullable=False) + tools_str = mapped_column(sa.Text, nullable=False) # json format credentials - credentials_str = mapped_column(db.Text, nullable=False) + credentials_str = mapped_column(sa.Text, nullable=False) # privacy policy privacy_policy = mapped_column(String(255), nullable=True) # custom_disclaimer @@ -167,11 +167,11 @@ class ToolLabelBinding(Base): __tablename__ = "tool_label_bindings" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_label_bind_pkey"), - db.UniqueConstraint("tool_id", "label_name", name="unique_tool_label_bind"), + sa.PrimaryKeyConstraint("id", name="tool_label_bind_pkey"), + sa.UniqueConstraint("tool_id", "label_name", name="unique_tool_label_bind"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # tool id tool_id: Mapped[str] = mapped_column(String(64), nullable=False) # tool type @@ -187,12 +187,12 @@ class WorkflowToolProvider(Base): __tablename__ = "tool_workflow_providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_workflow_provider_pkey"), - db.UniqueConstraint("name", "tenant_id", name="unique_workflow_tool_provider"), - db.UniqueConstraint("tenant_id", "app_id", name="unique_workflow_tool_provider_app_id"), + sa.PrimaryKeyConstraint("id", name="tool_workflow_provider_pkey"), + sa.UniqueConstraint("name", "tenant_id", name="unique_workflow_tool_provider"), + sa.UniqueConstraint("tenant_id", "app_id", name="unique_workflow_tool_provider_app_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # name of the workflow provider name: Mapped[str] = mapped_column(String(255), nullable=False) # label of the workflow provider @@ -208,17 +208,17 @@ class WorkflowToolProvider(Base): # tenant id tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # description of the provider - description: Mapped[str] = mapped_column(db.Text, nullable=False) + description: Mapped[str] = mapped_column(sa.Text, nullable=False) # parameter configuration - parameter_configuration: Mapped[str] = mapped_column(db.Text, nullable=False, server_default="[]") + parameter_configuration: Mapped[str] = mapped_column(sa.Text, nullable=False, server_default="[]") # privacy policy privacy_policy: Mapped[str] = mapped_column(String(255), nullable=True, server_default="") created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) @property @@ -245,19 +245,19 @@ class MCPToolProvider(Base): __tablename__ = "tool_mcp_providers" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_mcp_provider_pkey"), - db.UniqueConstraint("tenant_id", "server_url_hash", name="unique_mcp_provider_server_url"), - db.UniqueConstraint("tenant_id", "name", name="unique_mcp_provider_name"), - db.UniqueConstraint("tenant_id", "server_identifier", name="unique_mcp_provider_server_identifier"), + sa.PrimaryKeyConstraint("id", name="tool_mcp_provider_pkey"), + sa.UniqueConstraint("tenant_id", "server_url_hash", name="unique_mcp_provider_server_url"), + sa.UniqueConstraint("tenant_id", "name", name="unique_mcp_provider_name"), + sa.UniqueConstraint("tenant_id", "server_identifier", name="unique_mcp_provider_server_identifier"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # name of the mcp provider name: Mapped[str] = mapped_column(String(40), nullable=False) # server identifier of the mcp provider server_identifier: Mapped[str] = mapped_column(String(64), nullable=False) # encrypted url of the mcp provider - server_url: Mapped[str] = mapped_column(db.Text, nullable=False) + server_url: Mapped[str] = mapped_column(sa.Text, nullable=False) # hash of server_url for uniqueness check server_url_hash: Mapped[str] = mapped_column(String(64), nullable=False) # icon of the mcp provider @@ -267,16 +267,16 @@ class MCPToolProvider(Base): # who created this tool user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # encrypted credentials - encrypted_credentials: Mapped[str] = mapped_column(db.Text, nullable=True) + encrypted_credentials: Mapped[str] = mapped_column(sa.Text, nullable=True) # authed - authed: Mapped[bool] = mapped_column(db.Boolean, nullable=False, default=False) + authed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=False) # tools - tools: Mapped[str] = mapped_column(db.Text, nullable=False, default="[]") + tools: Mapped[str] = mapped_column(sa.Text, nullable=False, default="[]") created_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)") + sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) def load_user(self) -> Account | None: @@ -347,9 +347,9 @@ class ToolModelInvoke(Base): """ __tablename__ = "tool_model_invokes" - __table_args__ = (db.PrimaryKeyConstraint("id", name="tool_model_invoke_pkey"),) + __table_args__ = (sa.PrimaryKeyConstraint("id", name="tool_model_invoke_pkey"),) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # who invoke this tool user_id = mapped_column(StringUUID, nullable=False) # tenant id @@ -361,18 +361,18 @@ class ToolModelInvoke(Base): # tool name tool_name = mapped_column(String(128), nullable=False) # invoke parameters - model_parameters = mapped_column(db.Text, nullable=False) + model_parameters = mapped_column(sa.Text, nullable=False) # prompt messages - prompt_messages = mapped_column(db.Text, nullable=False) + prompt_messages = mapped_column(sa.Text, nullable=False) # invoke response - model_response = mapped_column(db.Text, nullable=False) + model_response = mapped_column(sa.Text, nullable=False) - prompt_tokens: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) - answer_tokens: Mapped[int] = mapped_column(db.Integer, nullable=False, server_default=db.text("0")) - answer_unit_price = mapped_column(db.Numeric(10, 4), nullable=False) - answer_price_unit = mapped_column(db.Numeric(10, 7), nullable=False, server_default=db.text("0.001")) - provider_response_latency = mapped_column(db.Float, nullable=False, server_default=db.text("0")) - total_price = mapped_column(db.Numeric(10, 7)) + prompt_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) + answer_tokens: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) + answer_unit_price = mapped_column(sa.Numeric(10, 4), nullable=False) + answer_price_unit = mapped_column(sa.Numeric(10, 7), nullable=False, server_default=sa.text("0.001")) + provider_response_latency = mapped_column(sa.Float, nullable=False, server_default=sa.text("0")) + total_price = mapped_column(sa.Numeric(10, 7)) currency: Mapped[str] = mapped_column(String(255), nullable=False) created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @@ -386,13 +386,13 @@ class ToolConversationVariables(Base): __tablename__ = "tool_conversation_variables" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_conversation_variables_pkey"), + sa.PrimaryKeyConstraint("id", name="tool_conversation_variables_pkey"), # add index for user_id and conversation_id - db.Index("user_id_idx", "user_id"), - db.Index("conversation_id_idx", "conversation_id"), + sa.Index("user_id_idx", "user_id"), + sa.Index("conversation_id_idx", "conversation_id"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # conversation user id user_id = mapped_column(StringUUID, nullable=False) # tenant id @@ -400,7 +400,7 @@ class ToolConversationVariables(Base): # conversation id conversation_id = mapped_column(StringUUID, nullable=False) # variables pool - variables_str = mapped_column(db.Text, nullable=False) + variables_str = mapped_column(sa.Text, nullable=False) created_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) updated_at = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) @@ -417,11 +417,11 @@ class ToolFile(Base): __tablename__ = "tool_files" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="tool_file_pkey"), - db.Index("tool_file_conversation_id_idx", "conversation_id"), + sa.PrimaryKeyConstraint("id", name="tool_file_pkey"), + sa.Index("tool_file_conversation_id_idx", "conversation_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # conversation user id user_id: Mapped[str] = mapped_column(StringUUID) # tenant id @@ -448,30 +448,30 @@ class DeprecatedPublishedAppTool(Base): __tablename__ = "tool_published_apps" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="published_app_tool_pkey"), - db.UniqueConstraint("app_id", "user_id", name="unique_published_app_tool"), + sa.PrimaryKeyConstraint("id", name="published_app_tool_pkey"), + sa.UniqueConstraint("app_id", "user_id", name="unique_published_app_tool"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) # id of the app app_id = mapped_column(StringUUID, ForeignKey("apps.id"), nullable=False) user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) # who published this tool - description = mapped_column(db.Text, nullable=False) + description = mapped_column(sa.Text, nullable=False) # llm_description of the tool, for LLM - llm_description = mapped_column(db.Text, nullable=False) + llm_description = mapped_column(sa.Text, nullable=False) # query description, query will be seem as a parameter of the tool, # to describe this parameter to llm, we need this field - query_description = mapped_column(db.Text, nullable=False) + query_description = mapped_column(sa.Text, nullable=False) # query name, the name of the query parameter query_name = mapped_column(String(40), nullable=False) # name of the tool provider tool_name = mapped_column(String(40), nullable=False) # author author = mapped_column(String(40), nullable=False) - created_at = mapped_column(sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) - updated_at = mapped_column(sa.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) + created_at = mapped_column(sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)")) + updated_at = mapped_column(sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)")) @property def description_i18n(self) -> I18nObject: diff --git a/api/models/web.py b/api/models/web.py index 1bf9b5c761..74f99e187b 100644 --- a/api/models/web.py +++ b/api/models/web.py @@ -1,5 +1,6 @@ from datetime import datetime +import sqlalchemy as sa from sqlalchemy import DateTime, String, func from sqlalchemy.orm import Mapped, mapped_column @@ -13,15 +14,15 @@ from .types import StringUUID class SavedMessage(Base): __tablename__ = "saved_messages" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="saved_message_pkey"), - db.Index("saved_message_message_idx", "app_id", "message_id", "created_by_role", "created_by"), + sa.PrimaryKeyConstraint("id", name="saved_message_pkey"), + sa.Index("saved_message_message_idx", "app_id", "message_id", "created_by_role", "created_by"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) message_id = mapped_column(StringUUID, nullable=False) created_by_role = mapped_column( - String(255), nullable=False, server_default=db.text("'end_user'::character varying") + String(255), nullable=False, server_default=sa.text("'end_user'::character varying") ) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) @@ -34,15 +35,15 @@ class SavedMessage(Base): class PinnedConversation(Base): __tablename__ = "pinned_conversations" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="pinned_conversation_pkey"), - db.Index("pinned_conversation_conversation_idx", "app_id", "conversation_id", "created_by_role", "created_by"), + sa.PrimaryKeyConstraint("id", name="pinned_conversation_pkey"), + sa.Index("pinned_conversation_conversation_idx", "app_id", "conversation_id", "created_by_role", "created_by"), ) - id = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) app_id = mapped_column(StringUUID, nullable=False) conversation_id: Mapped[str] = mapped_column(StringUUID) created_by_role = mapped_column( - String(255), nullable=False, server_default=db.text("'end_user'::character varying") + String(255), nullable=False, server_default=sa.text("'end_user'::character varying") ) created_by = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) diff --git a/api/models/workflow.py b/api/models/workflow.py index 6c7d061bb4..9cf6a00456 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -6,6 +6,7 @@ from enum import Enum, StrEnum from typing import TYPE_CHECKING, Any, Optional, Union from uuid import uuid4 +import sqlalchemy as sa from flask_login import current_user from sqlalchemy import DateTime, orm @@ -24,7 +25,6 @@ from ._workflow_exc import NodeNotFoundError, WorkflowDataError if TYPE_CHECKING: from models.model import AppMode -import sqlalchemy as sa from sqlalchemy import Index, PrimaryKeyConstraint, String, UniqueConstraint, func from sqlalchemy.orm import Mapped, declared_attr, mapped_column @@ -117,11 +117,11 @@ class Workflow(Base): __tablename__ = "workflows" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="workflow_pkey"), - db.Index("workflow_version_idx", "tenant_id", "app_id", "version"), + sa.PrimaryKeyConstraint("id", name="workflow_pkey"), + sa.Index("workflow_version_idx", "tenant_id", "app_id", "version"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) type: Mapped[str] = mapped_column(String(255), nullable=False) @@ -140,10 +140,10 @@ class Workflow(Base): server_onupdate=func.current_timestamp(), ) _environment_variables: Mapped[str] = mapped_column( - "environment_variables", db.Text, nullable=False, server_default="{}" + "environment_variables", sa.Text, nullable=False, server_default="{}" ) _conversation_variables: Mapped[str] = mapped_column( - "conversation_variables", db.Text, nullable=False, server_default="{}" + "conversation_variables", sa.Text, nullable=False, server_default="{}" ) VERSION_DRAFT = "draft" @@ -491,11 +491,11 @@ class WorkflowRun(Base): __tablename__ = "workflow_runs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="workflow_run_pkey"), - db.Index("workflow_run_triggerd_from_idx", "tenant_id", "app_id", "triggered_from"), + sa.PrimaryKeyConstraint("id", name="workflow_run_pkey"), + sa.Index("workflow_run_triggerd_from_idx", "tenant_id", "app_id", "triggered_from"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) app_id: Mapped[str] = mapped_column(StringUUID) @@ -503,19 +503,19 @@ class WorkflowRun(Base): type: Mapped[str] = mapped_column(String(255)) triggered_from: Mapped[str] = mapped_column(String(255)) version: Mapped[str] = mapped_column(String(255)) - graph: Mapped[Optional[str]] = mapped_column(db.Text) - inputs: Mapped[Optional[str]] = mapped_column(db.Text) + graph: Mapped[Optional[str]] = mapped_column(sa.Text) + inputs: Mapped[Optional[str]] = mapped_column(sa.Text) status: Mapped[str] = mapped_column(String(255)) # running, succeeded, failed, stopped, partial-succeeded outputs: Mapped[Optional[str]] = mapped_column(sa.Text, default="{}") - error: Mapped[Optional[str]] = mapped_column(db.Text) - elapsed_time: Mapped[float] = mapped_column(db.Float, nullable=False, server_default=sa.text("0")) + error: Mapped[Optional[str]] = mapped_column(sa.Text) + elapsed_time: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("0")) total_tokens: Mapped[int] = mapped_column(sa.BigInteger, server_default=sa.text("0")) - total_steps: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0"), nullable=True) + total_steps: Mapped[int] = mapped_column(sa.Integer, server_default=sa.text("0"), nullable=True) created_by_role: Mapped[str] = mapped_column(String(255)) # account, end_user created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) finished_at: Mapped[Optional[datetime]] = mapped_column(DateTime) - exceptions_count: Mapped[int] = mapped_column(db.Integer, server_default=db.text("0"), nullable=True) + exceptions_count: Mapped[int] = mapped_column(sa.Integer, server_default=sa.text("0"), nullable=True) @property def created_by_account(self): @@ -704,25 +704,25 @@ class WorkflowNodeExecutionModel(Base): ), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) app_id: Mapped[str] = mapped_column(StringUUID) workflow_id: Mapped[str] = mapped_column(StringUUID) triggered_from: Mapped[str] = mapped_column(String(255)) workflow_run_id: Mapped[Optional[str]] = mapped_column(StringUUID) - index: Mapped[int] = mapped_column(db.Integer) + index: Mapped[int] = mapped_column(sa.Integer) predecessor_node_id: Mapped[Optional[str]] = mapped_column(String(255)) node_execution_id: Mapped[Optional[str]] = mapped_column(String(255)) node_id: Mapped[str] = mapped_column(String(255)) node_type: Mapped[str] = mapped_column(String(255)) title: Mapped[str] = mapped_column(String(255)) - inputs: Mapped[Optional[str]] = mapped_column(db.Text) - process_data: Mapped[Optional[str]] = mapped_column(db.Text) - outputs: Mapped[Optional[str]] = mapped_column(db.Text) + inputs: Mapped[Optional[str]] = mapped_column(sa.Text) + process_data: Mapped[Optional[str]] = mapped_column(sa.Text) + outputs: Mapped[Optional[str]] = mapped_column(sa.Text) status: Mapped[str] = mapped_column(String(255)) - error: Mapped[Optional[str]] = mapped_column(db.Text) - elapsed_time: Mapped[float] = mapped_column(db.Float, server_default=db.text("0")) - execution_metadata: Mapped[Optional[str]] = mapped_column(db.Text) + error: Mapped[Optional[str]] = mapped_column(sa.Text) + elapsed_time: Mapped[float] = mapped_column(sa.Float, server_default=sa.text("0")) + execution_metadata: Mapped[Optional[str]] = mapped_column(sa.Text) created_at: Mapped[datetime] = mapped_column(DateTime, server_default=func.current_timestamp()) created_by_role: Mapped[str] = mapped_column(String(255)) created_by: Mapped[str] = mapped_column(StringUUID) @@ -834,11 +834,11 @@ class WorkflowAppLog(Base): __tablename__ = "workflow_app_logs" __table_args__ = ( - db.PrimaryKeyConstraint("id", name="workflow_app_log_pkey"), - db.Index("workflow_app_log_app_idx", "tenant_id", "app_id"), + sa.PrimaryKeyConstraint("id", name="workflow_app_log_pkey"), + sa.Index("workflow_app_log_app_idx", "tenant_id", "app_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuid_generate_v4()")) tenant_id: Mapped[str] = mapped_column(StringUUID) app_id: Mapped[str] = mapped_column(StringUUID) workflow_id: Mapped[str] = mapped_column(StringUUID, nullable=False) @@ -871,7 +871,7 @@ class ConversationVariable(Base): id: Mapped[str] = mapped_column(StringUUID, primary_key=True) conversation_id: Mapped[str] = mapped_column(StringUUID, nullable=False, primary_key=True, index=True) app_id: Mapped[str] = mapped_column(StringUUID, nullable=False, index=True) - data: Mapped[str] = mapped_column(db.Text, nullable=False) + data: Mapped[str] = mapped_column(sa.Text, nullable=False) created_at: Mapped[datetime] = mapped_column( DateTime, nullable=False, server_default=func.current_timestamp(), index=True ) @@ -933,7 +933,7 @@ class WorkflowDraftVariable(Base): __allow_unmapped__ = True # id is the unique identifier of a draft variable. - id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=db.text("uuid_generate_v4()")) + id: Mapped[str] = mapped_column(StringUUID, primary_key=True, server_default=sa.text("uuid_generate_v4()")) created_at: Mapped[datetime] = mapped_column( DateTime, diff --git a/api/services/plugin/data_migration.py b/api/services/plugin/data_migration.py index 7a4f886bf5..c5ad65ec87 100644 --- a/api/services/plugin/data_migration.py +++ b/api/services/plugin/data_migration.py @@ -2,6 +2,7 @@ import json import logging import click +import sqlalchemy as sa from core.plugin.entities.plugin import GenericProviderID, ModelProviderID, ToolProviderID from models.engine import db @@ -38,7 +39,7 @@ class PluginDataMigration: where {provider_column_name} not like '%/%' and {provider_column_name} is not null and {provider_column_name} != '' limit 1000""" with db.engine.begin() as conn: - rs = conn.execute(db.text(sql)) + rs = conn.execute(sa.text(sql)) current_iter_count = 0 for i in rs: @@ -94,7 +95,7 @@ limit 1000""" :provider_name {update_retrieval_model_sql} where id = :record_id""" - conn.execute(db.text(sql), params) + conn.execute(sa.text(sql), params) click.echo( click.style( f"[{processed_count}] Migrated [{table_name}] {record_id} ({provider_name})", @@ -148,7 +149,7 @@ limit 1000""" params = {"last_id": last_id or ""} with db.engine.begin() as conn: - rs = conn.execute(db.text(sql), params) + rs = conn.execute(sa.text(sql), params) current_iter_count = 0 batch_updates = [] @@ -193,7 +194,7 @@ limit 1000""" SET {provider_column_name} = :updated_value WHERE id = :record_id """ - conn.execute(db.text(update_sql), [{"updated_value": u, "record_id": r} for u, r in batch_updates]) + conn.execute(sa.text(update_sql), [{"updated_value": u, "record_id": r} for u, r in batch_updates]) click.echo( click.style( f"[{processed_count}] Batch migrated [{len(batch_updates)}] records from [{table_name}]", diff --git a/api/services/plugin/plugin_migration.py b/api/services/plugin/plugin_migration.py index 222d70a317..221069b2b3 100644 --- a/api/services/plugin/plugin_migration.py +++ b/api/services/plugin/plugin_migration.py @@ -9,6 +9,7 @@ from typing import Any, Optional from uuid import uuid4 import click +import sqlalchemy as sa import tqdm from flask import Flask, current_app from sqlalchemy.orm import Session @@ -197,7 +198,7 @@ class PluginMigration: """ with Session(db.engine) as session: rs = session.execute( - db.text(f"SELECT DISTINCT {column} FROM {table} WHERE tenant_id = :tenant_id"), {"tenant_id": tenant_id} + sa.text(f"SELECT DISTINCT {column} FROM {table} WHERE tenant_id = :tenant_id"), {"tenant_id": tenant_id} ) result = [] for row in rs: diff --git a/api/tasks/remove_app_and_related_data_task.py b/api/tasks/remove_app_and_related_data_task.py index b6f772dd60..929b60e529 100644 --- a/api/tasks/remove_app_and_related_data_task.py +++ b/api/tasks/remove_app_and_related_data_task.py @@ -3,6 +3,7 @@ import time from collections.abc import Callable import click +import sqlalchemy as sa from celery import shared_task # type: ignore from sqlalchemy import delete from sqlalchemy.exc import SQLAlchemyError @@ -331,7 +332,7 @@ def _delete_trace_app_configs(tenant_id: str, app_id: str): def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: str) -> None: while True: with db.engine.begin() as conn: - rs = conn.execute(db.text(query_sql), params) + rs = conn.execute(sa.text(query_sql), params) if rs.rowcount == 0: break From 99a4bd82b5b529135b2ce54fe883ac4b6e9d354c Mon Sep 17 00:00:00 2001 From: Matri Qi Date: Sun, 3 Aug 2025 10:09:26 +0800 Subject: [PATCH 115/415] chore: fix function name typo (#23306) --- .../app/configuration/config-var/config-modal/index.tsx | 4 ++-- .../nodes/_base/components/variable/output-var-list.tsx | 4 ++-- .../workflow/nodes/_base/components/variable/var-list.tsx | 4 ++-- .../workflow/nodes/loop/components/loop-variables/item.tsx | 4 ++-- .../nodes/variable-assigner/components/var-group-item.tsx | 4 ++-- .../panel/chat-variable-panel/components/variable-modal.tsx | 4 ++-- .../components/workflow/panel/env-panel/variable-modal.tsx | 4 ++-- web/utils/var.ts | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/web/app/components/app/configuration/config-var/config-modal/index.tsx b/web/app/components/app/configuration/config-var/config-modal/index.tsx index 27072f5208..861020545d 100644 --- a/web/app/components/app/configuration/config-var/config-modal/index.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/index.tsx @@ -11,7 +11,7 @@ import SelectTypeItem from '../select-type-item' import Field from './field' import Input from '@/app/components/base/input' import Toast from '@/app/components/base/toast' -import { checkKeys, getNewVarInWorkflow, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, getNewVarInWorkflow, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' import ConfigContext from '@/context/debug-configuration' import type { InputVar, MoreInfo, UploadFileSetting } from '@/app/components/workflow/types' import Modal from '@/app/components/base/modal' @@ -111,7 +111,7 @@ const ConfigModal: FC = ({ }, [checkVariableName, tempPayload.label]) const handleVarNameChange = useCallback((e: ChangeEvent) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) const value = e.target.value const { isValid, errorKey, errorMessageKey } = checkKeys([value], true) if (!isValid) { diff --git a/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx b/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx index 9fef1fe7b3..7365e91c21 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx @@ -8,7 +8,7 @@ import RemoveButton from '../remove-button' import VarTypePicker from './var-type-picker' import Input from '@/app/components/base/input' import type { VarType } from '@/app/components/workflow/types' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' import type { ToastHandle } from '@/app/components/base/toast' import Toast from '@/app/components/base/toast' import { useDebounceFn } from 'ahooks' @@ -62,7 +62,7 @@ const OutputVarList: FC = ({ return (e: React.ChangeEvent) => { const oldKey = list[index].variable - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) const newKey = e.target.value toastHandler?.clear?.() diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx index 2972b33511..e9e45d3aad 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-list.tsx @@ -8,7 +8,7 @@ import VarReferencePicker from './var-reference-picker' import Input from '@/app/components/base/input' import type { ValueSelector, Var, Variable } from '@/app/components/workflow/types' import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' import type { ToastHandle } from '@/app/components/base/toast' import Toast from '@/app/components/base/toast' import { ReactSortable } from 'react-sortablejs' @@ -74,7 +74,7 @@ const VarList: FC = ({ const handleVarNameChange = useCallback((index: number) => { return (e: React.ChangeEvent) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) const newKey = e.target.value diff --git a/web/app/components/workflow/nodes/loop/components/loop-variables/item.tsx b/web/app/components/workflow/nodes/loop/components/loop-variables/item.tsx index 0e8650d743..42dc34b399 100644 --- a/web/app/components/workflow/nodes/loop/components/loop-variables/item.tsx +++ b/web/app/components/workflow/nodes/loop/components/loop-variables/item.tsx @@ -10,7 +10,7 @@ import type { LoopVariable, LoopVariablesComponentShape, } from '@/app/components/workflow/nodes/loop/types' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' import Toast from '@/app/components/base/toast' type ItemProps = { @@ -36,7 +36,7 @@ const Item = ({ return true } const handleUpdateItemLabel = useCallback((e: any) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) if (!!e.target.value && !checkVariableName(e.target.value)) return handleUpdateLoopVariable(item.id, { label: e.target.value }) diff --git a/web/app/components/workflow/nodes/variable-assigner/components/var-group-item.tsx b/web/app/components/workflow/nodes/variable-assigner/components/var-group-item.tsx index 60be8a0842..a6b02931d9 100644 --- a/web/app/components/workflow/nodes/variable-assigner/components/var-group-item.tsx +++ b/web/app/components/workflow/nodes/variable-assigner/components/var-group-item.tsx @@ -15,7 +15,7 @@ import { VarType } from '@/app/components/workflow/types' import type { NodeOutPutVar, ValueSelector, Var } from '@/app/components/workflow/types' import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types' import { Folder } from '@/app/components/base/icons/src/vender/line/files' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' import Toast from '@/app/components/base/toast' const i18nPrefix = 'workflow.nodes.variableAssigner' @@ -89,7 +89,7 @@ const VarGroupItem: FC = ({ }] = useBoolean(false) const handleGroupNameChange = useCallback((e: ChangeEvent) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) const value = e.target.value const { isValid, errorKey, errorMessageKey } = checkKeys([value], false) if (!isValid) { diff --git a/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx index 869317ca6a..15292b928d 100644 --- a/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx +++ b/web/app/components/workflow/panel/chat-variable-panel/components/variable-modal.tsx @@ -16,7 +16,7 @@ import type { ConversationVariable } from '@/app/components/workflow/types' import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' import { ChatVarType } from '@/app/components/workflow/panel/chat-variable-panel/type' import cn from '@/utils/classnames' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' export type ModalPropsType = { chatVar?: ConversationVariable @@ -144,7 +144,7 @@ const ChatVariableModal = ({ } const handleVarNameChange = (e: React.ChangeEvent) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) if (!!e.target.value && !checkVariableName(e.target.value)) return setName(e.target.value || '') diff --git a/web/app/components/workflow/panel/env-panel/variable-modal.tsx b/web/app/components/workflow/panel/env-panel/variable-modal.tsx index 4877575d7e..1c780f7341 100644 --- a/web/app/components/workflow/panel/env-panel/variable-modal.tsx +++ b/web/app/components/workflow/panel/env-panel/variable-modal.tsx @@ -10,7 +10,7 @@ import { ToastContext } from '@/app/components/base/toast' import { useStore } from '@/app/components/workflow/store' import type { EnvironmentVariable } from '@/app/components/workflow/types' import cn from '@/utils/classnames' -import { checkKeys, replaceSpaceWithUnderscreInVarNameInput } from '@/utils/var' +import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' export type ModalPropsType = { env?: EnvironmentVariable @@ -44,7 +44,7 @@ const VariableModal = ({ } const handleVarNameChange = (e: React.ChangeEvent) => { - replaceSpaceWithUnderscreInVarNameInput(e.target) + replaceSpaceWithUnderscoreInVarNameInput(e.target) if (!!e.target.value && !checkVariableName(e.target.value)) return setName(e.target.value || '') diff --git a/web/utils/var.ts b/web/utils/var.ts index ce0ca030e1..bdc2fbdd42 100644 --- a/web/utils/var.ts +++ b/web/utils/var.ts @@ -121,7 +121,7 @@ export function getMarketplaceUrl(path: string, params?: Record { +export const replaceSpaceWithUnderscoreInVarNameInput = (input: HTMLInputElement) => { const start = input.selectionStart const end = input.selectionEnd From fbf844efd549b6e64d3e383b2aaeb7392992229b Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sun, 3 Aug 2025 10:11:47 +0800 Subject: [PATCH 116/415] Chore: replace deprecated `datetime.utcnow()` with `naive_utc_now()` (#23312) Signed-off-by: Yongtao Huang --- api/services/dataset_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index da475a18f8..1bcb597845 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -266,7 +266,7 @@ class DatasetService: "No Embedding Model available. Please configure a valid provider in the Settings -> Model Provider." ) except ProviderTokenNotInitError as ex: - raise ValueError(f"The dataset in unavailable, due to: {ex.description}") + raise ValueError(f"The dataset is unavailable, due to: {ex.description}") @staticmethod def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str): @@ -370,7 +370,7 @@ class DatasetService: raise ValueError("External knowledge api id is required.") # Update metadata fields dataset.updated_by = user.id if user else None - dataset.updated_at = datetime.datetime.utcnow() + dataset.updated_at = naive_utc_now() db.session.add(dataset) # Update external knowledge binding From 7831d44099d9142b76e15585ba456fc05f1c8965 Mon Sep 17 00:00:00 2001 From: znn Date: Sun, 3 Aug 2025 07:44:17 +0530 Subject: [PATCH 117/415] fixing chat window api recall fix on changing browser tab or window (#23301) --- .../base/chat/chat-with-history/hooks.tsx | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index 382ded3201..e88d28879b 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -159,9 +159,21 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { return currentConversationId }, [currentConversationId, newConversationId]) - const { data: appPinnedConversationData, mutate: mutateAppPinnedConversationData } = useSWR(['appConversationData', isInstalledApp, appId, true], () => fetchConversations(isInstalledApp, appId, undefined, true, 100)) - const { data: appConversationData, isLoading: appConversationDataLoading, mutate: mutateAppConversationData } = useSWR(['appConversationData', isInstalledApp, appId, false], () => fetchConversations(isInstalledApp, appId, undefined, false, 100)) - const { data: appChatListData, isLoading: appChatListDataLoading } = useSWR(chatShouldReloadKey ? ['appChatList', chatShouldReloadKey, isInstalledApp, appId] : null, () => fetchChatList(chatShouldReloadKey, isInstalledApp, appId)) + const { data: appPinnedConversationData, mutate: mutateAppPinnedConversationData } = useSWR( + appId ? ['appConversationData', isInstalledApp, appId, true] : null, + () => fetchConversations(isInstalledApp, appId, undefined, true, 100), + { revalidateOnFocus: false, revalidateOnReconnect: false }, + ) + const { data: appConversationData, isLoading: appConversationDataLoading, mutate: mutateAppConversationData } = useSWR( + appId ? ['appConversationData', isInstalledApp, appId, false] : null, + () => fetchConversations(isInstalledApp, appId, undefined, false, 100), + { revalidateOnFocus: false, revalidateOnReconnect: false }, + ) + const { data: appChatListData, isLoading: appChatListDataLoading } = useSWR( + chatShouldReloadKey ? ['appChatList', chatShouldReloadKey, isInstalledApp, appId] : null, + () => fetchChatList(chatShouldReloadKey, isInstalledApp, appId), + { revalidateOnFocus: false, revalidateOnReconnect: false }, + ) const [clearChatList, setClearChatList] = useState(false) const [isResponding, setIsResponding] = useState(false) From 6d5a7684b4f04c421c9432c755388e8176007d92 Mon Sep 17 00:00:00 2001 From: Warren Wong Date: Sun, 3 Aug 2025 16:26:11 +0800 Subject: [PATCH 118/415] feat: Add Download Button to UI for Knowledge Resource Source Files (#23320) --- api/controllers/console/__init__.py | 1 + .../console/datasets/upload_file.py | 62 +++++++++++++++++++ .../components/datasets/documents/list.tsx | 29 +++++++++ web/i18n/en-US/dataset-documents.ts | 1 + web/service/knowledge/use-document.ts | 17 +++++ 5 files changed, 110 insertions(+) create mode 100644 api/controllers/console/datasets/upload_file.py diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index e25f92399c..57dbc8da64 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -84,6 +84,7 @@ from .datasets import ( external, hit_testing, metadata, + upload_file, website, ) diff --git a/api/controllers/console/datasets/upload_file.py b/api/controllers/console/datasets/upload_file.py new file mode 100644 index 0000000000..9b456c771d --- /dev/null +++ b/api/controllers/console/datasets/upload_file.py @@ -0,0 +1,62 @@ +from flask_login import current_user +from flask_restful import Resource +from werkzeug.exceptions import NotFound + +from controllers.console import api +from controllers.console.wraps import ( + account_initialization_required, + setup_required, +) +from core.file import helpers as file_helpers +from extensions.ext_database import db +from models.dataset import Dataset +from models.model import UploadFile +from services.dataset_service import DocumentService + + +class UploadFileApi(Resource): + @setup_required + @account_initialization_required + def get(self, dataset_id, document_id): + """Get upload file.""" + # check dataset + dataset_id = str(dataset_id) + dataset = ( + db.session.query(Dataset) + .filter(Dataset.tenant_id == current_user.current_tenant_id, Dataset.id == dataset_id) + .first() + ) + if not dataset: + raise NotFound("Dataset not found.") + # check document + document_id = str(document_id) + document = DocumentService.get_document(dataset.id, document_id) + if not document: + raise NotFound("Document not found.") + # check upload file + if document.data_source_type != "upload_file": + raise ValueError(f"Document data source type ({document.data_source_type}) is not upload_file.") + data_source_info = document.data_source_info_dict + if data_source_info and "upload_file_id" in data_source_info: + file_id = data_source_info["upload_file_id"] + upload_file = db.session.query(UploadFile).filter(UploadFile.id == file_id).first() + if not upload_file: + raise NotFound("UploadFile not found.") + else: + raise ValueError("Upload file id not found in document data source info.") + + url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return { + "id": upload_file.id, + "name": upload_file.name, + "size": upload_file.size, + "extension": upload_file.extension, + "url": url, + "download_url": f"{url}&as_attachment=true", + "mime_type": upload_file.mime_type, + "created_by": upload_file.created_by, + "created_at": upload_file.created_at.timestamp(), + }, 200 + + +api.add_resource(UploadFileApi, "/datasets//documents//upload-file") diff --git a/web/app/components/datasets/documents/list.tsx b/web/app/components/datasets/documents/list.tsx index 2697580f4e..abfa578138 100644 --- a/web/app/components/datasets/documents/list.tsx +++ b/web/app/components/datasets/documents/list.tsx @@ -7,6 +7,7 @@ import { pick, uniq } from 'lodash-es' import { RiArchive2Line, RiDeleteBinLine, + RiDownloadLine, RiEditLine, RiEqualizer2Line, RiLoopLeftLine, @@ -35,6 +36,7 @@ import type { ColorMap, IndicatorProps } from '@/app/components/header/indicator import Indicator from '@/app/components/header/indicator' import { asyncRunSafe } from '@/utils' import { formatNumber } from '@/utils/format' +import { useDocumentDownload } from '@/service/knowledge/use-document' import NotionIcon from '@/app/components/base/notion-icon' import ProgressBar from '@/app/components/base/progress-bar' import { ChunkingMode, DataSourceType, DocumentActionType, type DocumentDisplayStatus, type SimpleDocumentDetail } from '@/models/datasets' @@ -97,6 +99,7 @@ export const StatusItem: FC<{ const { mutateAsync: enableDocument } = useDocumentEnable() const { mutateAsync: disableDocument } = useDocumentDisable() const { mutateAsync: deleteDocument } = useDocumentDelete() + const downloadDocument = useDocumentDownload() const onOperate = async (operationName: OperationName) => { let opApi = deleteDocument @@ -188,6 +191,7 @@ export const OperationAction: FC<{ scene?: 'list' | 'detail' className?: string }> = ({ embeddingAvailable, datasetId, detail, onUpdate, scene = 'list', className = '' }) => { + const downloadDocument = useDocumentDownload() const { id, enabled = false, archived = false, data_source_type, display_status } = detail || {} const [showModal, setShowModal] = useState(false) const [deleting, setDeleting] = useState(false) @@ -296,6 +300,31 @@ export const OperationAction: FC<{ )} {embeddingAvailable && ( <> + + + { }) } +// Download document with authentication (sends Authorization header) +export const useDocumentDownload = () => { + return useMutation({ + mutationFn: async ({ datasetId, documentId }: { datasetId: string; documentId: string }) => { + // The get helper automatically adds the Authorization header from localStorage + return get(`/datasets/${datasetId}/documents/${documentId}/upload-file`) + }, + onError: (error: any) => { + // Show a toast notification if download fails + const message = error?.message || 'Download failed.' + Toast.notify({ type: 'error', message }) + }, + }) +} + export const useSyncWebsite = () => { return useMutation({ mutationFn: ({ datasetId, documentId }: UpdateDocumentBatchParams) => { From 20f0238aabe932e69248454bf9f3540f234f1c3d Mon Sep 17 00:00:00 2001 From: qiaofenlin Date: Sun, 3 Aug 2025 16:27:12 +0800 Subject: [PATCH 119/415] feat: support workflow version specification in workflow and chat APIs (#23188) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- api/controllers/service_api/app/completion.py | 11 +- api/controllers/service_api/app/workflow.py | 57 ++++- api/services/app_generate_service.py | 25 +- api/services/errors/app.py | 8 + api/services/workflow_service.py | 5 +- .../template/template_advanced_chat.en.mdx | 6 + .../template/template_advanced_chat.ja.mdx | 6 + .../template/template_advanced_chat.zh.mdx | 6 + .../develop/template/template_chat.en.mdx | 6 + .../develop/template/template_chat.ja.mdx | 6 + .../develop/template/template_chat.zh.mdx | 6 + .../develop/template/template_workflow.en.mdx | 234 ++++++++++++++++++ .../develop/template/template_workflow.ja.mdx | 229 +++++++++++++++++ .../develop/template/template_workflow.zh.mdx | 229 +++++++++++++++++ .../context-menu/use-context-menu.ts | 7 +- .../panel/version-history-panel/index.tsx | 10 +- .../version-history-item.tsx | 1 - web/app/components/workflow/types.ts | 1 + web/i18n/en-US/workflow.ts | 4 +- web/i18n/zh-Hans/workflow.ts | 2 + 20 files changed, 846 insertions(+), 13 deletions(-) diff --git a/api/controllers/service_api/app/completion.py b/api/controllers/service_api/app/completion.py index edc66cc5e9..ea57f04850 100644 --- a/api/controllers/service_api/app/completion.py +++ b/api/controllers/service_api/app/completion.py @@ -2,7 +2,7 @@ import logging from flask import request from flask_restful import Resource, reqparse -from werkzeug.exceptions import InternalServerError, NotFound +from werkzeug.exceptions import BadRequest, InternalServerError, NotFound import services from controllers.service_api import api @@ -30,6 +30,7 @@ from libs import helper from libs.helper import uuid_value from models.model import App, AppMode, EndUser from services.app_generate_service import AppGenerateService +from services.errors.app import IsDraftWorkflowError, WorkflowIdFormatError, WorkflowNotFoundError from services.errors.llm import InvokeRateLimitError @@ -113,7 +114,7 @@ class ChatApi(Resource): parser.add_argument("conversation_id", type=uuid_value, location="json") parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json") parser.add_argument("auto_generate_name", type=bool, required=False, default=True, location="json") - + parser.add_argument("workflow_id", type=str, required=False, location="json") args = parser.parse_args() external_trace_id = get_external_trace_id(request) @@ -128,6 +129,12 @@ class ChatApi(Resource): ) return helper.compact_generate_response(response) + except WorkflowNotFoundError as ex: + raise NotFound(str(ex)) + except IsDraftWorkflowError as ex: + raise BadRequest(str(ex)) + except WorkflowIdFormatError as ex: + raise BadRequest(str(ex)) except services.errors.conversation.ConversationNotExistsError: raise NotFound("Conversation Not Exists.") except services.errors.conversation.ConversationCompletedError: diff --git a/api/controllers/service_api/app/workflow.py b/api/controllers/service_api/app/workflow.py index 370ff911b4..cd8a5f03ac 100644 --- a/api/controllers/service_api/app/workflow.py +++ b/api/controllers/service_api/app/workflow.py @@ -5,7 +5,7 @@ from flask import request from flask_restful import Resource, fields, marshal_with, reqparse from flask_restful.inputs import int_range from sqlalchemy.orm import Session, sessionmaker -from werkzeug.exceptions import InternalServerError +from werkzeug.exceptions import BadRequest, InternalServerError, NotFound from controllers.service_api import api from controllers.service_api.app.error import ( @@ -34,6 +34,7 @@ from libs.helper import TimestampField from models.model import App, AppMode, EndUser from repositories.factory import DifyAPIRepositoryFactory from services.app_generate_service import AppGenerateService +from services.errors.app import IsDraftWorkflowError, WorkflowIdFormatError, WorkflowNotFoundError from services.errors.llm import InvokeRateLimitError from services.workflow_app_service import WorkflowAppService @@ -120,6 +121,59 @@ class WorkflowRunApi(Resource): raise InternalServerError() +class WorkflowRunByIdApi(Resource): + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def post(self, app_model: App, end_user: EndUser, workflow_id: str): + """ + Run specific workflow by ID + """ + app_mode = AppMode.value_of(app_model.mode) + if app_mode != AppMode.WORKFLOW: + raise NotWorkflowAppError() + + parser = reqparse.RequestParser() + parser.add_argument("inputs", type=dict, required=True, nullable=False, location="json") + parser.add_argument("files", type=list, required=False, location="json") + parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json") + args = parser.parse_args() + + # Add workflow_id to args for AppGenerateService + args["workflow_id"] = workflow_id + + external_trace_id = get_external_trace_id(request) + if external_trace_id: + args["external_trace_id"] = external_trace_id + streaming = args.get("response_mode") == "streaming" + + try: + response = AppGenerateService.generate( + app_model=app_model, user=end_user, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=streaming + ) + + return helper.compact_generate_response(response) + except WorkflowNotFoundError as ex: + raise NotFound(str(ex)) + except IsDraftWorkflowError as ex: + raise BadRequest(str(ex)) + except WorkflowIdFormatError as ex: + raise BadRequest(str(ex)) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeRateLimitError as ex: + raise InvokeRateLimitHttpError(ex.description) + except InvokeError as e: + raise CompletionRequestError(e.description) + except ValueError as e: + raise e + except Exception: + logging.exception("internal server error.") + raise InternalServerError() + + class WorkflowTaskStopApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) def post(self, app_model: App, end_user: EndUser, task_id: str): @@ -193,5 +247,6 @@ class WorkflowAppLogApi(Resource): api.add_resource(WorkflowRunApi, "/workflows/run") api.add_resource(WorkflowRunDetailApi, "/workflows/run/") +api.add_resource(WorkflowRunByIdApi, "/workflows//run") api.add_resource(WorkflowTaskStopApi, "/workflows/tasks//stop") api.add_resource(WorkflowAppLogApi, "/workflows/logs") diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 6f7e705b52..6792324ec8 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -1,5 +1,6 @@ +import uuid from collections.abc import Generator, Mapping -from typing import Any, Union +from typing import Any, Optional, Union from openai._exceptions import RateLimitError @@ -15,6 +16,7 @@ from libs.helper import RateLimiter from models.model import Account, App, AppMode, EndUser from models.workflow import Workflow from services.billing_service import BillingService +from services.errors.app import WorkflowIdFormatError, WorkflowNotFoundError from services.errors.llm import InvokeRateLimitError from services.workflow_service import WorkflowService @@ -86,7 +88,8 @@ class AppGenerateService: request_id=request_id, ) elif app_model.mode == AppMode.ADVANCED_CHAT.value: - workflow = cls._get_workflow(app_model, invoke_from) + workflow_id = args.get("workflow_id") + workflow = cls._get_workflow(app_model, invoke_from, workflow_id) return rate_limit.generate( AdvancedChatAppGenerator.convert_to_event_stream( AdvancedChatAppGenerator().generate( @@ -101,7 +104,8 @@ class AppGenerateService: request_id=request_id, ) elif app_model.mode == AppMode.WORKFLOW.value: - workflow = cls._get_workflow(app_model, invoke_from) + workflow_id = args.get("workflow_id") + workflow = cls._get_workflow(app_model, invoke_from, workflow_id) return rate_limit.generate( WorkflowAppGenerator.convert_to_event_stream( WorkflowAppGenerator().generate( @@ -210,14 +214,27 @@ class AppGenerateService: ) @classmethod - def _get_workflow(cls, app_model: App, invoke_from: InvokeFrom) -> Workflow: + def _get_workflow(cls, app_model: App, invoke_from: InvokeFrom, workflow_id: Optional[str] = None) -> Workflow: """ Get workflow :param app_model: app model :param invoke_from: invoke from + :param workflow_id: optional workflow id to specify a specific version :return: """ workflow_service = WorkflowService() + + # If workflow_id is specified, get the specific workflow version + if workflow_id: + try: + workflow_uuid = uuid.UUID(workflow_id) + except ValueError: + raise WorkflowIdFormatError(f"Invalid workflow_id format: '{workflow_id}'. ") + workflow = workflow_service.get_published_workflow_by_id(app_model=app_model, workflow_id=workflow_id) + if not workflow: + raise WorkflowNotFoundError(f"Workflow not found with id: {workflow_id}") + return workflow + if invoke_from == InvokeFrom.DEBUGGER: # fetch draft workflow by app_model workflow = workflow_service.get_draft_workflow(app_model=app_model) diff --git a/api/services/errors/app.py b/api/services/errors/app.py index 5d348c61be..390716a47f 100644 --- a/api/services/errors/app.py +++ b/api/services/errors/app.py @@ -8,3 +8,11 @@ class WorkflowHashNotEqualError(Exception): class IsDraftWorkflowError(Exception): pass + + +class WorkflowNotFoundError(Exception): + pass + + +class WorkflowIdFormatError(Exception): + pass diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 8588144980..0c5d29b78e 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -129,7 +129,10 @@ class WorkflowService: if not workflow: return None if workflow.version == Workflow.VERSION_DRAFT: - raise IsDraftWorkflowError(f"Workflow is draft version, id={workflow_id}") + raise IsDraftWorkflowError( + f"Cannot use draft workflow version. Workflow ID: {workflow_id}. " + f"Please use a published workflow version or leave workflow_id empty." + ) return workflow def get_published_workflow(self, app_model: App) -> Optional[Workflow]: diff --git a/web/app/components/develop/template/template_advanced_chat.en.mdx b/web/app/components/develop/template/template_advanced_chat.en.mdx index ba698bdfdb..66c45b6e4f 100644 --- a/web/app/components/develop/template/template_advanced_chat.en.mdx +++ b/web/app/components/develop/template/template_advanced_chat.en.mdx @@ -80,6 +80,9 @@ Chat applications support session persistence, allowing previous chat history to Auto-generate title, default is `true`. If set to `false`, can achieve async title generation by calling the conversation rename API and setting `auto_generate` to `true`. + + (Optional) Workflow ID to specify a specific version, if not provided, uses the default published version. + (Optional) Trace ID. Used for integration with existing business trace components to achieve end-to-end distributed tracing. If not provided, the system will automatically generate a trace_id. Supports the following three ways to pass, in order of priority:
    - Header: via HTTP Header X-Trace-Id, highest priority.
    @@ -225,6 +228,9 @@ Chat applications support session persistence, allowing previous chat history to - 400, `provider_not_initialize`, no available model credential configuration - 400, `provider_quota_exceeded`, model invocation quota insufficient - 400, `model_currently_not_support`, current model unavailable + - 400, `workflow_not_found`, specified workflow version not found + - 400, `draft_workflow_error`, cannot use draft workflow version + - 400, `workflow_id_format_error`, invalid workflow_id format, expected UUID format - 400, `completion_request_error`, text generation failed - 500, internal server error diff --git a/web/app/components/develop/template/template_advanced_chat.ja.mdx b/web/app/components/develop/template/template_advanced_chat.ja.mdx index 5e9153ce1d..849b58129e 100644 --- a/web/app/components/develop/template/template_advanced_chat.ja.mdx +++ b/web/app/components/develop/template/template_advanced_chat.ja.mdx @@ -80,6 +80,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from タイトルを自動生成、デフォルトは`true`。 `false`に設定すると、会話のリネームAPIを呼び出し、`auto_generate`を`true`に設定することで非同期タイトル生成を実現できます。
    + + (オプション)ワークフローID、特定のバージョンを指定するために使用、提供されない場合はデフォルトの公開バージョンを使用。 + (オプション)トレースID。既存の業務システムのトレースコンポーネントと連携し、エンドツーエンドの分散トレーシングを実現するために使用します。指定がない場合、システムが自動的に trace_id を生成します。以下の3つの方法で渡すことができ、優先順位は次のとおりです:
    - Header:HTTPヘッダー X-Trace-Id で渡す(最優先)。
    @@ -225,6 +228,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from - 400, `provider_not_initialize`, 利用可能なモデル資格情報構成がありません - 400, `provider_quota_exceeded`, モデル呼び出しクォータが不足しています - 400, `model_currently_not_support`, 現在のモデルが利用できません + - 400, `workflow_not_found`, 指定されたワークフローバージョンが見つかりません + - 400, `draft_workflow_error`, ドラフトワークフローバージョンは使用できません + - 400, `workflow_id_format_error`, ワークフローID形式エラー、UUID形式が必要です - 400, `completion_request_error`, テキスト生成に失敗しました - 500, 内部サーバーエラー diff --git a/web/app/components/develop/template/template_advanced_chat.zh.mdx b/web/app/components/develop/template/template_advanced_chat.zh.mdx index 969edaa479..47c88bda90 100755 --- a/web/app/components/develop/template/template_advanced_chat.zh.mdx +++ b/web/app/components/develop/template/template_advanced_chat.zh.mdx @@ -78,6 +78,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' (选填)自动生成标题,默认 `true`。 若设置为 `false`,则可通过调用会话重命名接口并设置 `auto_generate` 为 `true` 实现异步生成标题。 + + (选填)工作流ID,用于指定特定版本,如果不提供则使用默认的已发布版本。 + (选填)链路追踪ID。适用于与业务系统已有的trace组件打通,实现端到端分布式追踪等场景。如果未指定,系统会自动生成trace_id。支持以下三种方式传递,具体优先级依次为:
    - Header:通过 HTTP Header X-Trace-Id 传递,优先级最高。
    @@ -224,6 +227,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' - 400,`provider_not_initialize`,无可用模型凭据配置 - 400,`provider_quota_exceeded`,模型调用额度不足 - 400,`model_currently_not_support`,当前模型不可用 + - 400,`workflow_not_found`,指定的工作流版本未找到 + - 400,`draft_workflow_error`,无法使用草稿工作流版本 + - 400,`workflow_id_format_error`,工作流ID格式错误,需要UUID格式 - 400,`completion_request_error`,文本生成失败 - 500,服务内部异常 diff --git a/web/app/components/develop/template/template_chat.en.mdx b/web/app/components/develop/template/template_chat.en.mdx index e357c7628c..24efe62fda 100644 --- a/web/app/components/develop/template/template_chat.en.mdx +++ b/web/app/components/develop/template/template_chat.en.mdx @@ -74,6 +74,9 @@ Chat applications support session persistence, allowing previous chat history to Auto-generate title, default is `true`. If set to `false`, can achieve async title generation by calling the conversation rename API and setting `auto_generate` to `true`.
    + + (Optional) Workflow ID to specify a specific version, if not provided, uses the default published version. + (Optional) Trace ID. Used for integration with existing business trace components to achieve end-to-end distributed tracing. If not provided, the system will automatically generate a trace_id. Supports the following three ways to pass, in order of priority:
    - Header: via HTTP Header X-Trace-Id, highest priority.
    @@ -180,6 +183,9 @@ Chat applications support session persistence, allowing previous chat history to - 400, `provider_not_initialize`, no available model credential configuration - 400, `provider_quota_exceeded`, model invocation quota insufficient - 400, `model_currently_not_support`, current model unavailable + - 400, `workflow_not_found`, specified workflow version not found + - 400, `draft_workflow_error`, cannot use draft workflow version + - 400, `workflow_id_format_error`, invalid workflow_id format, expected UUID format - 400, `completion_request_error`, text generation failed - 500, internal server error diff --git a/web/app/components/develop/template/template_chat.ja.mdx b/web/app/components/develop/template/template_chat.ja.mdx index 1e2ba63db7..d251bcd5cd 100644 --- a/web/app/components/develop/template/template_chat.ja.mdx +++ b/web/app/components/develop/template/template_chat.ja.mdx @@ -74,6 +74,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from タイトルを自動生成します。デフォルトは`true`です。 `false`に設定すると、会話のリネームAPIを呼び出し、`auto_generate`を`true`に設定することで非同期タイトル生成を実現できます。
    + + (オプション)ワークフローID、特定のバージョンを指定するために使用、提供されない場合はデフォルトの公開バージョンを使用。 + (オプション)トレースID。既存の業務システムのトレースコンポーネントと連携し、エンドツーエンドの分散トレーシングを実現するために使用します。指定がない場合、システムが自動的に trace_id を生成します。以下の3つの方法で渡すことができ、優先順位は次のとおりです:
    - Header:HTTPヘッダー X-Trace-Id で渡す(最優先)。
    @@ -180,6 +183,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from - 400, `provider_not_initialize`, 利用可能なモデル資格情報構成がありません - 400, `provider_quota_exceeded`, モデル呼び出しクォータが不足しています - 400, `model_currently_not_support`, 現在のモデルは利用できません + - 400, `workflow_not_found`, 指定されたワークフローバージョンが見つかりません + - 400, `draft_workflow_error`, ドラフトワークフローバージョンは使用できません + - 400, `workflow_id_format_error`, ワークフローID形式エラー、UUID形式が必要です - 400, `completion_request_error`, テキスト生成に失敗しました - 500, 内部サーバーエラー diff --git a/web/app/components/develop/template/template_chat.zh.mdx b/web/app/components/develop/template/template_chat.zh.mdx index e140325583..998d524b55 100644 --- a/web/app/components/develop/template/template_chat.zh.mdx +++ b/web/app/components/develop/template/template_chat.zh.mdx @@ -73,6 +73,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' (选填)自动生成标题,默认 `true`。 若设置为 `false`,则可通过调用会话重命名接口并设置 `auto_generate` 为 `true` 实现异步生成标题。 + + (选填)工作流ID,用于指定特定版本,如果不提供则使用默认的已发布版本。 + (选填)链路追踪ID。适用于与业务系统已有的trace组件打通,实现端到端分布式追踪等场景。如果未指定,系统会自动生成trace_id。支持以下三种方式传递,具体优先级依次为:
    - Header:通过 HTTP Header X-Trace-Id 传递,优先级最高。
    @@ -181,6 +184,9 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' - 400,`provider_not_initialize`,无可用模型凭据配置 - 400,`provider_quota_exceeded`,模型调用额度不足 - 400,`model_currently_not_support`,当前模型不可用 + - 400,`workflow_not_found`,指定的工作流版本未找到 + - 400,`draft_workflow_error`,无法使用草稿工作流版本 + - 400,`workflow_id_format_error`,工作流ID格式错误,需要UUID格式 - 400,`completion_request_error`,文本生成失败 - 500,服务内部异常 diff --git a/web/app/components/develop/template/template_workflow.en.mdx b/web/app/components/develop/template/template_workflow.en.mdx index 8ac1db3287..19487a852d 100644 --- a/web/app/components/develop/template/template_workflow.en.mdx +++ b/web/app/components/develop/template/template_workflow.en.mdx @@ -338,6 +338,240 @@ Workflow applications offers non-session support and is ideal for translation, a --- + + +
    + Execute a specific version of workflow by specifying the workflow ID in the path parameter. + + ### Path + - `workflow_id` (string) Required Workflow ID to specify a specific version of workflow + + How to obtain: In the version history interface, click the copy icon on the right side of each version entry to copy the complete workflow ID. Each version entry contains a copyable ID field. + + ### Request Body + - `inputs` (object) Required + Allows the entry of various variable values defined by the App. + The `inputs` parameter contains multiple key/value pairs, with each key corresponding to a specific variable and each value being the specific value for that variable. + The workflow application requires at least one key/value pair to be inputted. The variable can be of File Array type. + File Array type variable is suitable for inputting files combined with text understanding and answering questions, available only when the model supports file parsing and understanding capability. + If the variable is of File Array type, the corresponding value should be a list whose elements contain following attributions: + - `type` (string) Supported type: + - `document` ('TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB') + - `image` ('JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG') + - `audio` ('MP3', 'M4A', 'WAV', 'WEBM', 'AMR') + - `video` ('MP4', 'MOV', 'MPEG', 'MPGA') + - `custom` (Other file types) + - `transfer_method` (string) Transfer method, `remote_url` for image URL / `local_file` for file upload + - `url` (string) Image URL (when the transfer method is `remote_url`) + - `upload_file_id` (string) Uploaded file ID, which must be obtained by uploading through the File Upload API in advance (when the transfer method is `local_file`) + + - `response_mode` (string) Required + The mode of response return, supporting: + - `streaming` Streaming mode (recommended), implements a typewriter-like output through SSE ([Server-Sent Events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)). + - `blocking` Blocking mode, returns result after execution is complete. (Requests may be interrupted if the process is long) + Due to Cloudflare restrictions, the request will be interrupted without a return after 100 seconds. + - `user` (string) Required + User identifier, used to define the identity of the end-user for retrieval and statistics. + Should be uniquely defined by the developer within the application. +
    + The user identifier should be consistent with the user passed in the message sending interface. The Service API does not share conversations created by the WebApp. + - `files` (array[object]) Optional + - `trace_id` (string) Optional + Trace ID. Used for integration with existing business trace components to achieve end-to-end distributed tracing. If not provided, the system will automatically generate a trace_id. Supports the following three ways to pass, in order of priority: + 1. Header: via HTTP Header `X-Trace-Id`, highest priority. + 2. Query parameter: via URL query parameter `trace_id`. + 3. Request Body: via request body field `trace_id` (i.e., this field). + + ### Response + When `response_mode` is `blocking`, return a CompletionResponse object. + When `response_mode` is `streaming`, return a ChunkCompletionResponse stream. + + ### CompletionResponse + Returns the App result, `Content-Type` is `application/json`. + - `workflow_run_id` (string) Unique ID of workflow execution + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `data` (object) detail of result + - `id` (string) ID of workflow execution + - `workflow_id` (string) ID of related workflow + - `status` (string) status of execution, `running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) Optional content of output + - `error` (string) Optional reason of error + - `elapsed_time` (float) Optional total seconds to be used + - `total_tokens` (int) Optional tokens to be used + - `total_steps` (int) default 0 + - `created_at` (timestamp) start time + - `finished_at` (timestamp) end time + + ### ChunkCompletionResponse + Returns the stream chunks outputted by the App, `Content-Type` is `text/event-stream`. + Each streaming chunk starts with `data:`, separated by two newline characters `\n\n`, as shown below: + + ```streaming {{ title: 'Response' }} + data: {"event": "text_chunk", "workflow_run_id": "b85e5fc5-751b-454d-b14e-dc5f240b0a31", "task_id": "bd029338-b068-4d34-a331-fc85478922c2", "data": {"text": "\u4e3a\u4e86", "from_variable_selector": ["1745912968134", "text"]}}\n\n + ``` + + The structure of the streaming chunks varies depending on the `event`: + - `event: workflow_started` workflow starts execution + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `workflow_started` + - `data` (object) detail + - `id` (string) Unique ID of workflow execution + - `workflow_id` (string) ID of related workflow + - `created_at` (timestamp) Creation timestamp, e.g., 1705395332 + - `event: node_started` node execution started + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `node_started` + - `data` (object) detail + - `id` (string) Unique ID of workflow execution + - `node_id` (string) ID of node + - `node_type` (string) type of node + - `title` (string) name of node + - `index` (int) Execution sequence number, used to display Tracing Node sequence + - `predecessor_node_id` (string) optional Prefix node ID, used for canvas display execution path + - `inputs` (object) Contents of all preceding node variables used in the node + - `created_at` (timestamp) timestamp of start, e.g., 1705395332 + - `event: text_chunk` Text fragment + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `text_chunk` + - `data` (object) detail + - `text` (string) Text content + - `from_variable_selector` (array) Text source path, helps developers understand which variable of which node the text is generated from + - `event: node_finished` node execution finished, success and failure are different states in the same event + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `node_finished` + - `data` (object) detail + - `id` (string) Unique ID of node execution + - `node_id` (string) ID of node + - `index` (int) Execution sequence number, used to display Tracing Node sequence + - `predecessor_node_id` (string) optional Prefix node ID, used for canvas display execution path + - `inputs` (object) Contents of all preceding node variables used in the node + - `process_data` (json) Optional Process data of node + - `outputs` (json) Optional content of output + - `status` (string) status of execution `running` / `succeeded` / `failed` / `stopped` + - `error` (string) Optional reason of error + - `elapsed_time` (float) Optional total seconds to be used + - `execution_metadata` (json) metadata + - `total_tokens` (int) optional tokens to be used + - `total_price` (decimal) optional total cost + - `currency` (string) optional currency, such as `USD` / `RMB` + - `created_at` (timestamp) timestamp of start, e.g., 1705395332 + - `event: workflow_finished` workflow execution finished, success and failure are different states in the same event + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `workflow_run_id` (string) Unique ID of workflow execution + - `event` (string) fixed to `workflow_finished` + - `data` (object) detail + - `id` (string) Unique ID of workflow execution + - `workflow_id` (string) ID of related workflow + - `status` (string) status of execution `running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) Optional content of output + - `error` (string) Optional reason of error + - `elapsed_time` (float) Optional total seconds to be used + - `total_tokens` (int) Optional tokens to be used + - `total_steps` (int) default 0 + - `created_at` (timestamp) start time + - `finished_at` (timestamp) end time + - `event: tts_message` TTS audio stream event, i.e., speech synthesis output. The content is an audio block in Mp3 format, encoded as a base64 string, which can be decoded directly when playing. (Only available when auto-play is enabled) + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `message_id` (string) Unique message ID + - `audio` (string) The audio block after speech synthesis is encoded as base64 text content, which can be directly base64 decoded and sent to the player when playing + - `created_at` (int) Creation timestamp, e.g., 1705395332 + - `event: tts_message_end` TTS audio stream end event, receiving this event indicates the end of audio stream return. + - `task_id` (string) Task ID, used for request tracking and the below Stop Generate API + - `message_id` (string) Unique message ID + - `audio` (string) The end event has no audio, so this is an empty string + - `created_at` (int) Creation timestamp, e.g., 1705395332 + - `event: ping` Ping event every 10s to keep the connection alive. + + ### Errors + - 400, `invalid_param`, Invalid input parameters + - 400, `app_unavailable`, App configuration unavailable + - 400, `provider_not_initialize`, No available model credentials configured + - 400, `provider_quota_exceeded`, Insufficient model call quota + - 400, `model_currently_not_support`, Current model unavailable + - 400, `workflow_not_found`, Specified workflow version not found + - 400, `draft_workflow_error`, Cannot use draft workflow version + - 400, `workflow_id_format_error`, Workflow ID format error, UUID format required + - 400, `workflow_request_error`, Workflow execution failed + - 500, Internal service error + + + + + ```bash {{ title: 'cURL' }} + curl -X POST '${props.appDetail.api_base_url}/workflows/{workflow_id}/run' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{ + "inputs": {}, + "response_mode": "streaming", + "user": "abc-123" + }' + ``` + + + ```json {{ title: 'File variable example' }} + { + "inputs": { + "{variable_name}": + [ + { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + ] + } + } + ``` + + ### Blocking Mode + + ```json {{ title: 'Response' }} + { + "workflow_run_id": "djflajgkldjgd", + "task_id": "9da23599-e713-473b-982c-4328d4f5c78a", + "data": { + "id": "fdlsjfjejkghjda", + "workflow_id": "fldjaslkfjlsda", + "status": "succeeded", + "outputs": { + "text": "Nice to meet you." + }, + "error": null, + "elapsed_time": 0.875, + "total_tokens": 3562, + "total_steps": 8, + "created_at": 1705407629, + "finished_at": 1727807631 + } + } + ``` + + ### Streaming Mode + + ```streaming {{ title: 'Response' }} + data: {"event": "workflow_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "created_at": 1679586595}} + data: {"event": "node_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "created_at": 1679586595}} + data: {"event": "node_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "execution_metadata": {"total_tokens": 63127864, "total_price": 2.378, "currency": "USD"}, "created_at": 1679586595}} + data: {"event": "workflow_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "total_tokens": 63127864, "total_steps": "1", "created_at": 1679586595, "finished_at": 1679976595}} + data: {"event": "tts_message", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"} + data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} + ``` + + + + +--- + + + + パスパラメータでワークフローIDを指定して、特定バージョンのワークフローを実行します。 + + ### パス + - `workflow_id` (string) 必須 特定バージョンのワークフローを指定するためのワークフローID + + 取得方法:バージョン履歴で特定バージョンのワークフローIDを照会できます。 + + ### リクエストボディ + - `inputs` (object) 必須 + App で定義された各変数値を入力できます。 + inputs パラメータには複数のキー/値ペアが含まれており、各キーは特定の変数に対応し、各値はその変数の具体的な値です。変数はファイルリスト型にすることができます。 + ファイルリスト型変数は、ファイルをテキスト理解と組み合わせて質問に答えるために入力するのに適しており、モデルがファイル解析機能をサポートしている場合のみ使用できます。変数がファイルリスト型の場合、その変数に対応する値はリスト形式である必要があり、各要素には以下の内容が含まれます: + - `type` (string) サポートされるタイプ: + - `document` 具体的なタイプには以下が含まれます:'TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB' + - `image` 具体的なタイプには以下が含まれます:'JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG' + - `audio` 具体的なタイプには以下が含まれます:'MP3', 'M4A', 'WAV', 'WEBM', 'AMR' + - `video` 具体的なタイプには以下が含まれます:'MP4', 'MOV', 'MPEG', 'MPGA' + - `custom` 具体的なタイプには以下が含まれます:その他のファイルタイプ + - `transfer_method` (string) 転送方法、`remote_url` 画像URL / `local_file` ファイルアップロード + - `url` (string) 画像URL(転送方法が `remote_url` の場合のみ) + - `upload_file_id` (string) アップロードされたファイルID(転送方法が `local_file` の場合のみ) + - `response_mode` (string) 必須 + 応答返却モード、以下をサポート: + - `streaming` ストリーミングモード(推奨)。SSE(**[Server-Sent Events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)**)をベースにタイプライター風の出力を実現。 + - `blocking` ブロッキングモード、実行完了後に結果を返却。(プロセスが長い場合、リクエストが中断される可能性があります)。 + Cloudflare の制限により、100秒後に応答がない場合、リクエストは中断されます。 + - `user` (string) 必須 + ユーザー識別子、エンドユーザーのアイデンティティを定義し、検索・統計を容易にするために使用されます。 + 開発者が定義するルールで、アプリケーション内でユーザー識別子が一意である必要があります。API は WebApp で作成されたセッションにアクセスできません。 + - `files` (array[object]) オプション + - `trace_id` (string) オプション + トレースID。既存のビジネスシステムのトレースコンポーネントと統合して、エンドツーエンドの分散トレーシングを実現するために使用されます。指定されていない場合、システムは自動的に `trace_id` を生成します。以下の3つの方法で渡すことができ、優先順位は以下の通りです: + 1. ヘッダー:HTTP ヘッダー `X-Trace-Id` で渡すことを推奨、最高優先度。 + 2. クエリパラメータ:URL クエリパラメータ `trace_id` で渡す。 + 3. リクエストボディ:リクエストボディフィールド `trace_id` で渡す(つまり、このフィールド)。 + + ### 応答 + `response_mode` が `blocking` の場合、CompletionResponse オブジェクトを返します。 + `response_mode` が `streaming` の場合、ChunkCompletionResponse オブジェクトのストリーミングシーケンスを返します。 + + ### CompletionResponse + 完全な App 結果を返し、`Content-Type` は `application/json` です。 + - `workflow_run_id` (string) ワークフロー実行ID + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `data` (object) 詳細内容 + - `id` (string) ワークフロー実行ID + - `workflow_id` (string) 関連するワークフローID + - `status` (string) 実行ステータス、`running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) オプション 出力内容 + - `error` (string) オプション エラー理由 + - `elapsed_time` (float) オプション 使用時間(s) + - `total_tokens` (int) オプション 使用されるトークンの総数 + - `total_steps` (int) 総ステップ数(冗長)、デフォルト 0 + - `created_at` (timestamp) 開始時間 + - `finished_at` (timestamp) 終了時間 + + ### ChunkCompletionResponse + App の出力ストリーミングチャンクを返し、`Content-Type` は `text/event-stream` です。 + 各ストリーミングチャンクは `data:` で始まり、チャンク間は `\n\n` つまり2つの改行文字で区切られます。以下のようになります: + + ```streaming {{ title: '応答' }} + data: {"event": "text_chunk", "workflow_run_id": "b85e5fc5-751b-454d-b14e-dc5f240b0a31", "task_id": "bd029338-b068-4d34-a331-fc85478922c2", "data": {"text": "\u4e3a\u4e86", "from_variable_selector": ["1745912968134", "text"]}}\n\n + ``` + + ストリーミングチャンクは `event` によって構造が異なり、以下のタイプが含まれます: + - `event: workflow_started` ワークフロー実行開始 + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `workflow_run_id` (string) ワークフロー実行ID + - `event` (string) `workflow_started` に固定 + - `data` (object) 詳細内容 + - `id` (string) ワークフロー実行ID + - `workflow_id` (string) 関連するワークフローID + - `created_at` (timestamp) 開始時間 + - `event: node_started` ノード実行開始 + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `workflow_run_id` (string) ワークフロー実行ID + - `event` (string) `node_started` に固定 + - `data` (object) 詳細内容 + - `id` (string) ワークフロー実行ID + - `node_id` (string) ノードID + - `node_type` (string) ノードタイプ + - `title` (string) ノード名 + - `index` (int) 実行シーケンス番号、Tracing Node シーケンスの表示に使用 + - `predecessor_node_id` (string) 前置ノードID、キャンバス表示実行パスに使用 + - `inputs` (object) ノードで使用されるすべての前置ノード変数の内容 + - `created_at` (timestamp) 開始時間 + - `event: text_chunk` テキストフラグメント + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `workflow_run_id` (string) ワークフロー実行ID + - `event` (string) `text_chunk` に固定 + - `data` (object) 詳細内容 + - `text` (string) テキスト内容 + - `from_variable_selector` (array) テキストソースパス、開発者がテキストがどのノードのどの変数から生成されたかを理解するのに役立ちます + - `event: node_finished` ノード実行終了、成功と失敗は同じイベント内の異なる状態 + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `workflow_run_id` (string) ワークフロー実行ID + - `event` (string) `node_finished` に固定 + - `data` (object) 詳細内容 + - `id` (string) ノード実行ID + - `node_id` (string) ノードID + - `index` (int) 実行シーケンス番号、Tracing Node シーケンスの表示に使用 + - `predecessor_node_id` (string) オプション 前置ノードID、キャンバス表示実行パスに使用 + - `inputs` (object) ノードで使用されるすべての前置ノード変数の内容 + - `process_data` (json) オプション ノードプロセスデータ + - `outputs` (json) オプション 出力内容 + - `status` (string) 実行ステータス `running` / `succeeded` / `failed` / `stopped` + - `error` (string) オプション エラー理由 + - `elapsed_time` (float) オプション 使用時間(s) + - `execution_metadata` (json) メタデータ + - `total_tokens` (int) オプション 使用されるトークンの総数 + - `total_price` (decimal) オプション 総費用 + - `currency` (string) オプション 通貨、例:`USD` / `RMB` + - `created_at` (timestamp) 開始時間 + - `event: workflow_finished` ワークフロー実行終了、成功と失敗は同じイベント内の異なる状態 + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `workflow_run_id` (string) ワークフロー実行ID + - `event` (string) `workflow_finished` に固定 + - `data` (object) 詳細内容 + - `id` (string) ワークフロー実行ID + - `workflow_id` (string) 関連するワークフローID + - `status` (string) 実行ステータス `running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) オプション 出力内容 + - `error` (string) オプション エラー理由 + - `elapsed_time` (float) オプション 使用時間(s) + - `total_tokens` (int) オプション 使用されるトークンの総数 + - `total_steps` (int) 総ステップ数(冗長)、デフォルト 0 + - `created_at` (timestamp) 開始時間 + - `finished_at` (timestamp) 終了時間 + - `event: tts_message` TTS オーディオストリームイベント、つまり:音声合成出力。内容はMp3形式のオーディオブロックで、base64エンコードされた文字列として、再生時に直接デコードできます。(自動再生が有効な場合のみこのメッセージがあります) + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `message_id` (string) メッセージ一意ID + - `audio` (string) 音声合成後のオーディオブロックはbase64エンコードされたテキスト内容として、再生時に直接base64デコードしてプレーヤーに送信できます + - `created_at` (int) 作成タイムスタンプ、例:1705395332 + - `event: tts_message_end` TTS オーディオストリーム終了イベント、このイベントを受信すると、オーディオストリームの返却が終了したことを示します。 + - `task_id` (string) タスクID、リクエスト追跡と以下の停止応答インターフェースに使用 + - `message_id` (string) メッセージ一意ID + - `audio` (string) 終了イベントにはオーディオがないため、ここは空文字列です + - `created_at` (int) 作成タイムスタンプ、例:1705395332 + - `event: ping` 10秒ごとのpingイベント、接続を維持します。 + + ### エラー + - 400,`invalid_param`,入力パラメータ異常 + - 400,`app_unavailable`,App 設定が利用できません + - 400,`provider_not_initialize`,利用可能なモデル認証情報設定がありません + - 400,`provider_quota_exceeded`,モデル呼び出しクォータが不足しています + - 400,`model_currently_not_support`,現在のモデルが利用できません + - 400,`workflow_not_found`,指定されたワークフローバージョンが見つかりません + - 400,`draft_workflow_error`,ドラフトワークフローバージョンを使用できません + - 400,`workflow_id_format_error`,ワークフローID形式エラー、UUID形式が必要です + - 400,`workflow_request_error`,ワークフロー実行に失敗しました + - 500,サービス内部異常 + + + + + ```bash {{ title: 'cURL' }} + curl -X POST '${props.appDetail.api_base_url}/workflows/{workflow_id}/run' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{ + "inputs": {}, + "response_mode": "streaming", + "user": "abc-123" + }' + ``` + + + ```json {{ title: 'ファイル変数の例' }} + { + "inputs": { + "{variable_name}": + [ + { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + ] + } + } + ``` + + ### ブロッキングモード + + ```json {{ title: '応答' }} + { + "workflow_run_id": "djflajgkldjgd", + "task_id": "9da23599-e713-473b-982c-4328d4f5c78a", + "data": { + "id": "fdlsjfjejkghjda", + "workflow_id": "fldjaslkfjlsda", + "status": "succeeded", + "outputs": { + "text": "Nice to meet you." + }, + "error": null, + "elapsed_time": 0.875, + "total_tokens": 3562, + "total_steps": 8, + "created_at": 1705407629, + "finished_at": 1727807631 + } + } + ``` + + ### ストリーミングモード + + ```streaming {{ title: '応答' }} + data: {"event": "workflow_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "created_at": 1679586595}} + data: {"event": "node_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "created_at": 1679586595}} + data: {"event": "node_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "execution_metadata": {"total_tokens": 63127864, "total_price": 2.378, "currency": "USD"}, "created_at": 1679586595}} + data: {"event": "workflow_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "total_tokens": 63127864, "total_steps": "1", "created_at": 1679586595, "finished_at": 1679976595}} + data: {"event": "tts_message", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"} + data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} + ``` + + + + +--- + + + + 执行指定版本的工作流,通过路径参数指定工作流ID。 + + ### Path + - `workflow_id` (string) Required 工作流ID,用于指定特定版本的工作流 + + 获取方式:可以在版本历史中查询特定版本的工作流ID。 + + ### Request Body + - `inputs` (object) Required + 允许传入 App 定义的各变量值。 + inputs 参数包含了多组键值对(Key/Value pairs),每组的键对应一个特定变量,每组的值则是该变量的具体值。变量可以是文件列表类型。 + 文件列表类型变量适用于传入文件结合文本理解并回答问题,仅当模型支持该类型文件解析能力时可用。如果该变量是文件列表类型,该变量对应的值应是列表格式,其中每个元素应包含以下内容: + - `type` (string) 支持类型: + - `document` 具体类型包含:'TXT', 'MD', 'MARKDOWN', 'PDF', 'HTML', 'XLSX', 'XLS', 'DOCX', 'CSV', 'EML', 'MSG', 'PPTX', 'PPT', 'XML', 'EPUB' + - `image` 具体类型包含:'JPG', 'JPEG', 'PNG', 'GIF', 'WEBP', 'SVG' + - `audio` 具体类型包含:'MP3', 'M4A', 'WAV', 'WEBM', 'AMR' + - `video` 具体类型包含:'MP4', 'MOV', 'MPEG', 'MPGA' + - `custom` 具体类型包含:其他文件类型 + - `transfer_method` (string) 传递方式,`remote_url` 图片地址 / `local_file` 上传文件 + - `url` (string) 图片地址(仅当传递方式为 `remote_url` 时) + - `upload_file_id` (string) 上传文件 ID(仅当传递方式为 `local_file` 时) + - `response_mode` (string) Required + 返回响应模式,支持: + - `streaming` 流式模式(推荐)。基于 SSE(**[Server-Sent Events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events)**)实现类似打字机输出方式的流式返回。 + - `blocking` 阻塞模式,等待执行完毕后返回结果。(请求若流程较长可能会被中断)。 + 由于 Cloudflare 限制,请求会在 100 秒超时无返回后中断。 + - `user` (string) Required + 用户标识,用于定义终端用户的身份,方便检索、统计。 + 由开发者定义规则,需保证用户标识在应用内唯一。API 无法访问 WebApp 创建的会话。 + - `files` (array[object]) 可选 + - `trace_id` (string) Optional + 链路追踪ID。适用于与业务系统已有的trace组件打通,实现端到端分布式追踪等场景。如果未指定,系统将自动生成 `trace_id`。支持以下三种方式传递,具体优先级依次为: + 1. Header:推荐通过 HTTP Header `X-Trace-Id` 传递,优先级最高。 + 2. Query 参数:通过 URL 查询参数 `trace_id` 传递。 + 3. Request Body:通过请求体字段 `trace_id` 传递(即本字段)。 + + ### Response + 当 `response_mode` 为 `blocking` 时,返回 CompletionResponse object。 + 当 `response_mode` 为 `streaming`时,返回 ChunkCompletionResponse object 流式序列。 + + ### CompletionResponse + 返回完整的 App 结果,`Content-Type` 为 `application/json` 。 + - `workflow_run_id` (string) workflow 执行 ID + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `data` (object) 详细内容 + - `id` (string) workflow 执行 ID + - `workflow_id` (string) 关联 Workflow ID + - `status` (string) 执行状态, `running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) Optional 输出内容 + - `error` (string) Optional 错误原因 + - `elapsed_time` (float) Optional 耗时(s) + - `total_tokens` (int) Optional 总使用 tokens + - `total_steps` (int) 总步数(冗余),默认 0 + - `created_at` (timestamp) 开始时间 + - `finished_at` (timestamp) 结束时间 + + ### ChunkCompletionResponse + 返回 App 输出的流式块,`Content-Type` 为 `text/event-stream`。 + 每个流式块均为 data: 开头,块之间以 `\n\n` 即两个换行符分隔,如下所示: + + ```streaming {{ title: 'Response' }} + data: {"event": "text_chunk", "workflow_run_id": "b85e5fc5-751b-454d-b14e-dc5f240b0a31", "task_id": "bd029338-b068-4d34-a331-fc85478922c2", "data": {"text": "\u4e3a\u4e86", "from_variable_selector": ["1745912968134", "text"]}}\n\n + ``` + + 流式块中根据 `event` 不同,结构也不同,包含以下类型: + - `event: workflow_started` workflow 开始执行 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `workflow_started` + - `data` (object) 详细内容 + - `id` (string) workflow 执行 ID + - `workflow_id` (string) 关联 Workflow ID + - `created_at` (timestamp) 开始时间 + - `event: node_started` node 开始执行 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `node_started` + - `data` (object) 详细内容 + - `id` (string) workflow 执行 ID + - `node_id` (string) 节点 ID + - `node_type` (string) 节点类型 + - `title` (string) 节点名称 + - `index` (int) 执行序号,用于展示 Tracing Node 顺序 + - `predecessor_node_id` (string) 前置节点 ID,用于画布展示执行路径 + - `inputs` (object) 节点中所有使用到的前置节点变量内容 + - `created_at` (timestamp) 开始时间 + - `event: text_chunk` 文本片段 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `text_chunk` + - `data` (object) 详细内容 + - `text` (string) 文本内容 + - `from_variable_selector` (array) 文本来源路径,帮助开发者了解文本是由哪个节点的哪个变量生成的 + - `event: node_finished` node 执行结束,成功失败同一事件中不同状态 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `node_finished` + - `data` (object) 详细内容 + - `id` (string) node 执行 ID + - `node_id` (string) 节点 ID + - `index` (int) 执行序号,用于展示 Tracing Node 顺序 + - `predecessor_node_id` (string) optional 前置节点 ID,用于画布展示执行路径 + - `inputs` (object) 节点中所有使用到的前置节点变量内容 + - `process_data` (json) Optional 节点过程数据 + - `outputs` (json) Optional 输出内容 + - `status` (string) 执行状态 `running` / `succeeded` / `failed` / `stopped` + - `error` (string) Optional 错误原因 + - `elapsed_time` (float) Optional 耗时(s) + - `execution_metadata` (json) 元数据 + - `total_tokens` (int) optional 总使用 tokens + - `total_price` (decimal) optional 总费用 + - `currency` (string) optional 货币,如 `USD` / `RMB` + - `created_at` (timestamp) 开始时间 + - `event: workflow_finished` workflow 执行结束,成功失败同一事件中不同状态 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `workflow_run_id` (string) workflow 执行 ID + - `event` (string) 固定为 `workflow_finished` + - `data` (object) 详细内容 + - `id` (string) workflow 执行 ID + - `workflow_id` (string) 关联 Workflow ID + - `status` (string) 执行状态 `running` / `succeeded` / `failed` / `stopped` + - `outputs` (json) Optional 输出内容 + - `error` (string) Optional 错误原因 + - `elapsed_time` (float) Optional 耗时(s) + - `total_tokens` (int) Optional 总使用 tokens + - `total_steps` (int) 总步数(冗余),默认 0 + - `created_at` (timestamp) 开始时间 + - `finished_at` (timestamp) 结束时间 + - `event: tts_message` TTS 音频流事件,即:语音合成输出。内容是Mp3格式的音频块,使用 base64 编码后的字符串,播放的时候直接解码即可。(开启自动播放才有此消息) + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `message_id` (string) 消息唯一 ID + - `audio` (string) 语音合成之后的音频块使用 Base64 编码之后的文本内容,播放的时候直接 base64 解码送入播放器即可 + - `created_at` (int) 创建时间戳,如:1705395332 + - `event: tts_message_end` TTS 音频流结束事件,收到这个事件表示音频流返回结束。 + - `task_id` (string) 任务 ID,用于请求跟踪和下方的停止响应接口 + - `message_id` (string) 消息唯一 ID + - `audio` (string) 结束事件是没有音频的,所以这里是空字符串 + - `created_at` (int) 创建时间戳,如:1705395332 + - `event: ping` 每 10s 一次的 ping 事件,保持连接存活。 + + ### Errors + - 400,`invalid_param`,传入参数异常 + - 400,`app_unavailable`,App 配置不可用 + - 400,`provider_not_initialize`,无可用模型凭据配置 + - 400,`provider_quota_exceeded`,模型调用额度不足 + - 400,`model_currently_not_support`,当前模型不可用 + - 400,`workflow_not_found`,指定的工作流版本未找到 + - 400,`draft_workflow_error`,无法使用草稿工作流版本 + - 400,`workflow_id_format_error`,工作流ID格式错误,需要UUID格式 + - 400,`workflow_request_error`,workflow 执行失败 + - 500,服务内部异常 + + + + + ```bash {{ title: 'cURL' }} + curl -X POST '${props.appDetail.api_base_url}/workflows/{workflow_id}/run' \ + --header 'Authorization: Bearer {api_key}' \ + --header 'Content-Type: application/json' \ + --data-raw '{ + "inputs": {}, + "response_mode": "streaming", + "user": "abc-123" + }' + ``` + + + ```json {{ title: 'File variable example' }} + { + "inputs": { + "{variable_name}": + [ + { + "transfer_method": "local_file", + "upload_file_id": "{upload_file_id}", + "type": "{document_type}" + } + ] + } + } + ``` + + ### Blocking Mode + + ```json {{ title: 'Response' }} + { + "workflow_run_id": "djflajgkldjgd", + "task_id": "9da23599-e713-473b-982c-4328d4f5c78a", + "data": { + "id": "fdlsjfjejkghjda", + "workflow_id": "fldjaslkfjlsda", + "status": "succeeded", + "outputs": { + "text": "Nice to meet you." + }, + "error": null, + "elapsed_time": 0.875, + "total_tokens": 3562, + "total_steps": 8, + "created_at": 1705407629, + "finished_at": 1727807631 + } + } + ``` + + ### Streaming Mode + + ```streaming {{ title: 'Response' }} + data: {"event": "workflow_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "created_at": 1679586595}} + data: {"event": "node_started", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "created_at": 1679586595}} + data: {"event": "node_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "node_id": "dfjasklfjdslag", "node_type": "start", "title": "Start", "index": 0, "predecessor_node_id": "fdljewklfklgejlglsd", "inputs": {}, "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "execution_metadata": {"total_tokens": 63127864, "total_price": 2.378, "currency": "USD"}, "created_at": 1679586595}} + data: {"event": "workflow_finished", "task_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "workflow_run_id": "5ad498-f0c7-4085-b384-88cbe6290", "data": {"id": "5ad498-f0c7-4085-b384-88cbe6290", "workflow_id": "dfjasklfjdslag", "outputs": {}, "status": "succeeded", "elapsed_time": 0.324, "total_tokens": 63127864, "total_steps": "1", "created_at": 1679586595, "finished_at": 1679976595}} + data: {"event": "tts_message", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"} + data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""} + ``` + + + + +--- + { key: VersionHistoryContextMenuOptions.edit, name: t('workflow.versionHistory.nameThisVersion'), }, + { + key: VersionHistoryContextMenuOptions.copyId, + name: t('workflow.versionHistory.copyId'), + }, ] - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [isNamedVersion]) + }, [isNamedVersion, t]) return { deleteOperation, diff --git a/web/app/components/workflow/panel/version-history-panel/index.tsx b/web/app/components/workflow/panel/version-history-panel/index.tsx index 521ab58780..70acca7597 100644 --- a/web/app/components/workflow/panel/version-history-panel/index.tsx +++ b/web/app/components/workflow/panel/version-history-panel/index.tsx @@ -2,6 +2,7 @@ import React, { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' import { RiArrowDownDoubleLine, RiCloseLine, RiLoader2Line } from '@remixicon/react' +import copy from 'copy-to-clipboard' import { useNodesSyncDraft, useWorkflowRun } from '../../hooks' import { useStore, useWorkflowStore } from '../../store' import { VersionHistoryContextMenuOptions, WorkflowVersionFilterOptions } from '../../types' @@ -99,8 +100,15 @@ const VersionHistoryPanel = () => { case VersionHistoryContextMenuOptions.delete: setDeleteConfirmOpen(true) break + case VersionHistoryContextMenuOptions.copyId: + copy(item.id) + Toast.notify({ + type: 'success', + message: t('workflow.versionHistory.action.copyIdSuccess'), + }) + break } - }, []) + }, [t]) const handleCancel = useCallback((operation: VersionHistoryContextMenuOptions) => { switch (operation) { diff --git a/web/app/components/workflow/panel/version-history-panel/version-history-item.tsx b/web/app/components/workflow/panel/version-history-panel/version-history-item.tsx index 98fd7ce45c..797a3fbe4f 100644 --- a/web/app/components/workflow/panel/version-history-panel/version-history-item.tsx +++ b/web/app/components/workflow/panel/version-history-panel/version-history-item.tsx @@ -55,7 +55,6 @@ const VersionHistoryItem: React.FC = ({ useEffect(() => { if (isDraft) onClick(item) - // eslint-disable-next-line react-hooks/exhaustive-deps }, []) const handleClickItem = () => { diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index 11a424c5dd..5840a04f26 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -445,6 +445,7 @@ export enum VersionHistoryContextMenuOptions { restore = 'restore', edit = 'edit', delete = 'delete', + copyId = 'copyId', } export type ChildNodeTypeCount = { diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 763739ba32..9a8492f50e 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -920,6 +920,7 @@ const translation = { defaultName: 'Untitled Version', nameThisVersion: 'Name this version', editVersionInfo: 'Edit version info', + copyId: 'Copy ID', editField: { title: 'Title', releaseNotes: 'Release Notes', @@ -936,7 +937,8 @@ const translation = { deleteFailure: 'Failed to delete version', updateSuccess: 'Version updated', updateFailure: 'Failed to update version', - }, + copyIdSuccess: 'ID copied to clipboard', + }, }, debug: { settingsTab: 'Settings', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index b1c28c4666..25f05ce8ba 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -920,6 +920,7 @@ const translation = { defaultName: '未命名', nameThisVersion: '命名', editVersionInfo: '编辑信息', + copyId: '复制 ID', editField: { title: '标题', releaseNotes: '发布说明', @@ -936,6 +937,7 @@ const translation = { deleteFailure: '删除失败', updateSuccess: '版本信息已更新', updateFailure: '更新失败', + copyIdSuccess: 'ID 已复制到剪贴板', }, }, debug: { From 76d123fe1913414d3b233f8c96a1e2d3902b69ea Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sun, 3 Aug 2025 18:30:09 +0800 Subject: [PATCH 120/415] Fix segment query tenant bug and variable naming typo (#23321) Signed-off-by: Yongtao Huang --- api/services/dataset_service.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 1bcb597845..8934608da1 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -2372,7 +2372,7 @@ class SegmentService: ) if not segments: return - real_deal_segmment_ids = [] + real_deal_segment_ids = [] for segment in segments: indexing_cache_key = f"segment_{segment.id}_indexing" cache_result = redis_client.get(indexing_cache_key) @@ -2382,10 +2382,10 @@ class SegmentService: segment.disabled_at = None segment.disabled_by = None db.session.add(segment) - real_deal_segmment_ids.append(segment.id) + real_deal_segment_ids.append(segment.id) db.session.commit() - enable_segments_to_index_task.delay(real_deal_segmment_ids, dataset.id, document.id) + enable_segments_to_index_task.delay(real_deal_segment_ids, dataset.id, document.id) elif action == "disable": segments = ( db.session.query(DocumentSegment) @@ -2399,7 +2399,7 @@ class SegmentService: ) if not segments: return - real_deal_segmment_ids = [] + real_deal_segment_ids = [] for segment in segments: indexing_cache_key = f"segment_{segment.id}_indexing" cache_result = redis_client.get(indexing_cache_key) @@ -2409,10 +2409,10 @@ class SegmentService: segment.disabled_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None) segment.disabled_by = current_user.id db.session.add(segment) - real_deal_segmment_ids.append(segment.id) + real_deal_segment_ids.append(segment.id) db.session.commit() - disable_segments_from_index_task.delay(real_deal_segmment_ids, dataset.id, document.id) + disable_segments_from_index_task.delay(real_deal_segment_ids, dataset.id, document.id) else: raise InvalidActionError() @@ -2670,7 +2670,7 @@ class SegmentService: # check segment segment = ( db.session.query(DocumentSegment) - .where(DocumentSegment.id == segment_id, DocumentSegment.tenant_id == user_id) + .where(DocumentSegment.id == segment_id, DocumentSegment.tenant_id == tenant_id) .first() ) if not segment: From 0c925bd0882784f3e47f77ed4dd036d4d1ddad1f Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sun, 3 Aug 2025 18:30:47 +0800 Subject: [PATCH 121/415] feat(ui): unify tag editing in app sidebar and add management entry to TagFilter (#23325) --- web/__tests__/unified-tags-logic.test.ts | 396 ++++++++++++++++++ .../(appDetailLayout)/[appId]/layout-main.tsx | 24 +- web/app/components/app-sidebar/app-info.tsx | 40 +- .../components/base/tag-management/filter.tsx | 10 + .../base/tag-management/selector.tsx | 20 +- web/service/apps.ts | 15 + 6 files changed, 496 insertions(+), 9 deletions(-) create mode 100644 web/__tests__/unified-tags-logic.test.ts diff --git a/web/__tests__/unified-tags-logic.test.ts b/web/__tests__/unified-tags-logic.test.ts new file mode 100644 index 0000000000..c920e28e0a --- /dev/null +++ b/web/__tests__/unified-tags-logic.test.ts @@ -0,0 +1,396 @@ +/** + * Unified Tags Editing - Pure Logic Tests + * + * This test file validates the core business logic and state management + * behaviors introduced in the recent 7 commits without requiring complex mocks. + */ + +describe('Unified Tags Editing - Pure Logic Tests', () => { + describe('Tag State Management Logic', () => { + it('should detect when tag values have changed', () => { + const currentValue = ['tag1', 'tag2'] + const newSelectedTagIDs = ['tag1', 'tag3'] + + // This is the valueNotChanged logic from TagSelector component + const valueNotChanged + = currentValue.length === newSelectedTagIDs.length + && currentValue.every(v => newSelectedTagIDs.includes(v)) + && newSelectedTagIDs.every(v => currentValue.includes(v)) + + expect(valueNotChanged).toBe(false) + }) + + it('should correctly identify unchanged tag values', () => { + const currentValue = ['tag1', 'tag2'] + const newSelectedTagIDs = ['tag2', 'tag1'] // Same tags, different order + + const valueNotChanged + = currentValue.length === newSelectedTagIDs.length + && currentValue.every(v => newSelectedTagIDs.includes(v)) + && newSelectedTagIDs.every(v => currentValue.includes(v)) + + expect(valueNotChanged).toBe(true) + }) + + it('should calculate correct tag operations for binding/unbinding', () => { + const currentValue = ['tag1', 'tag2'] + const selectedTagIDs = ['tag2', 'tag3'] + + // This is the handleValueChange logic from TagSelector + const addTagIDs = selectedTagIDs.filter(v => !currentValue.includes(v)) + const removeTagIDs = currentValue.filter(v => !selectedTagIDs.includes(v)) + + expect(addTagIDs).toEqual(['tag3']) + expect(removeTagIDs).toEqual(['tag1']) + }) + + it('should handle empty tag arrays correctly', () => { + const currentValue: string[] = [] + const selectedTagIDs = ['tag1'] + + const addTagIDs = selectedTagIDs.filter(v => !currentValue.includes(v)) + const removeTagIDs = currentValue.filter(v => !selectedTagIDs.includes(v)) + + expect(addTagIDs).toEqual(['tag1']) + expect(removeTagIDs).toEqual([]) + expect(currentValue.length).toBe(0) // Verify empty array usage + }) + + it('should handle removing all tags', () => { + const currentValue = ['tag1', 'tag2'] + const selectedTagIDs: string[] = [] + + const addTagIDs = selectedTagIDs.filter(v => !currentValue.includes(v)) + const removeTagIDs = currentValue.filter(v => !selectedTagIDs.includes(v)) + + expect(addTagIDs).toEqual([]) + expect(removeTagIDs).toEqual(['tag1', 'tag2']) + expect(selectedTagIDs.length).toBe(0) // Verify empty array usage + }) + }) + + describe('Fallback Logic (from layout-main.tsx)', () => { + it('should trigger fallback when tags are missing or empty', () => { + const appDetailWithoutTags = { tags: [] } + const appDetailWithTags = { tags: [{ id: 'tag1' }] } + const appDetailWithUndefinedTags = { tags: undefined as any } + + // This simulates the condition in layout-main.tsx + const shouldFallback1 = !appDetailWithoutTags.tags || appDetailWithoutTags.tags.length === 0 + const shouldFallback2 = !appDetailWithTags.tags || appDetailWithTags.tags.length === 0 + const shouldFallback3 = !appDetailWithUndefinedTags.tags || appDetailWithUndefinedTags.tags.length === 0 + + expect(shouldFallback1).toBe(true) // Empty array should trigger fallback + expect(shouldFallback2).toBe(false) // Has tags, no fallback needed + expect(shouldFallback3).toBe(true) // Undefined tags should trigger fallback + }) + + it('should preserve tags when fallback succeeds', () => { + const originalAppDetail = { tags: [] as any[] } + const fallbackResult = { tags: [{ id: 'tag1', name: 'fallback-tag' }] } + + // This simulates the successful fallback in layout-main.tsx + if (fallbackResult?.tags) + originalAppDetail.tags = fallbackResult.tags + + expect(originalAppDetail.tags).toEqual(fallbackResult.tags) + expect(originalAppDetail.tags.length).toBe(1) + }) + + it('should continue with empty tags when fallback fails', () => { + const originalAppDetail: { tags: any[] } = { tags: [] } + const fallbackResult: { tags?: any[] } | null = null + + // This simulates fallback failure in layout-main.tsx + if (fallbackResult?.tags) + originalAppDetail.tags = fallbackResult.tags + + expect(originalAppDetail.tags).toEqual([]) + }) + }) + + describe('TagSelector Auto-initialization Logic', () => { + it('should trigger getTagList when tagList is empty', () => { + const tagList: any[] = [] + let getTagListCalled = false + const getTagList = () => { + getTagListCalled = true + } + + // This simulates the useEffect in TagSelector + if (tagList.length === 0) + getTagList() + + expect(getTagListCalled).toBe(true) + }) + + it('should not trigger getTagList when tagList has items', () => { + const tagList = [{ id: 'tag1', name: 'existing-tag' }] + let getTagListCalled = false + const getTagList = () => { + getTagListCalled = true + } + + // This simulates the useEffect in TagSelector + if (tagList.length === 0) + getTagList() + + expect(getTagListCalled).toBe(false) + }) + }) + + describe('State Initialization Patterns', () => { + it('should maintain AppCard tag state pattern', () => { + const app = { tags: [{ id: 'tag1', name: 'test' }] } + + // Original AppCard pattern: useState(app.tags) + const initialTags = app.tags + expect(Array.isArray(initialTags)).toBe(true) + expect(initialTags.length).toBe(1) + expect(initialTags).toBe(app.tags) // Reference equality for AppCard + }) + + it('should maintain AppInfo tag state pattern', () => { + const appDetail = { tags: [{ id: 'tag1', name: 'test' }] } + + // New AppInfo pattern: useState(appDetail?.tags || []) + const initialTags = appDetail?.tags || [] + expect(Array.isArray(initialTags)).toBe(true) + expect(initialTags.length).toBe(1) + }) + + it('should handle undefined appDetail gracefully in AppInfo', () => { + const appDetail = undefined + + // AppInfo pattern with undefined appDetail + const initialTags = (appDetail as any)?.tags || [] + expect(Array.isArray(initialTags)).toBe(true) + expect(initialTags.length).toBe(0) + }) + }) + + describe('CSS Class and Layout Logic', () => { + it('should apply correct minimum width condition', () => { + const minWidth = 'true' + + // This tests the minWidth logic in TagSelector + const shouldApplyMinWidth = minWidth && '!min-w-80' + expect(shouldApplyMinWidth).toBe('!min-w-80') + }) + + it('should not apply minimum width when not specified', () => { + const minWidth = undefined + + const shouldApplyMinWidth = minWidth && '!min-w-80' + expect(shouldApplyMinWidth).toBeFalsy() + }) + + it('should handle overflow layout classes correctly', () => { + // This tests the layout pattern from AppCard and new AppInfo + const overflowLayoutClasses = { + container: 'flex w-0 grow items-center', + inner: 'w-full', + truncate: 'truncate', + } + + expect(overflowLayoutClasses.container).toContain('w-0 grow') + expect(overflowLayoutClasses.inner).toContain('w-full') + expect(overflowLayoutClasses.truncate).toBe('truncate') + }) + }) + + describe('fetchAppWithTags Service Logic', () => { + it('should correctly find app by ID from app list', () => { + const appList = [ + { id: 'app1', name: 'App 1', tags: [] }, + { id: 'test-app-id', name: 'Test App', tags: [{ id: 'tag1', name: 'test' }] }, + { id: 'app3', name: 'App 3', tags: [] }, + ] + const targetAppId = 'test-app-id' + + // This simulates the logic in fetchAppWithTags + const foundApp = appList.find(app => app.id === targetAppId) + + expect(foundApp).toBeDefined() + expect(foundApp?.id).toBe('test-app-id') + expect(foundApp?.tags.length).toBe(1) + }) + + it('should return null when app not found', () => { + const appList = [ + { id: 'app1', name: 'App 1' }, + { id: 'app2', name: 'App 2' }, + ] + const targetAppId = 'nonexistent-app' + + const foundApp = appList.find(app => app.id === targetAppId) || null + + expect(foundApp).toBeNull() + }) + + it('should handle empty app list', () => { + const appList: any[] = [] + const targetAppId = 'any-app' + + const foundApp = appList.find(app => app.id === targetAppId) || null + + expect(foundApp).toBeNull() + expect(appList.length).toBe(0) // Verify empty array usage + }) + }) + + describe('Data Structure Validation', () => { + it('should maintain consistent tag data structure', () => { + const tag = { + id: 'tag1', + name: 'test-tag', + type: 'app', + binding_count: 1, + } + + expect(tag).toHaveProperty('id') + expect(tag).toHaveProperty('name') + expect(tag).toHaveProperty('type') + expect(tag).toHaveProperty('binding_count') + expect(tag.type).toBe('app') + expect(typeof tag.binding_count).toBe('number') + }) + + it('should handle tag arrays correctly', () => { + const tags = [ + { id: 'tag1', name: 'Tag 1', type: 'app', binding_count: 1 }, + { id: 'tag2', name: 'Tag 2', type: 'app', binding_count: 0 }, + ] + + expect(Array.isArray(tags)).toBe(true) + expect(tags.length).toBe(2) + expect(tags.every(tag => tag.type === 'app')).toBe(true) + }) + + it('should validate app data structure with tags', () => { + const app = { + id: 'test-app', + name: 'Test App', + tags: [ + { id: 'tag1', name: 'Tag 1', type: 'app', binding_count: 1 }, + ], + } + + expect(app).toHaveProperty('id') + expect(app).toHaveProperty('name') + expect(app).toHaveProperty('tags') + expect(Array.isArray(app.tags)).toBe(true) + expect(app.tags.length).toBe(1) + }) + }) + + describe('Performance and Edge Cases', () => { + it('should handle large tag arrays efficiently', () => { + const largeTags = Array.from({ length: 100 }, (_, i) => `tag${i}`) + const selectedTags = ['tag1', 'tag50', 'tag99'] + + // Performance test: filtering should be efficient + const startTime = Date.now() + const addTags = selectedTags.filter(tag => !largeTags.includes(tag)) + const removeTags = largeTags.filter(tag => !selectedTags.includes(tag)) + const endTime = Date.now() + + expect(endTime - startTime).toBeLessThan(10) // Should be very fast + expect(addTags.length).toBe(0) // All selected tags exist + expect(removeTags.length).toBe(97) // 100 - 3 = 97 tags to remove + }) + + it('should handle malformed tag data gracefully', () => { + const mixedData = [ + { id: 'valid1', name: 'Valid Tag', type: 'app', binding_count: 1 }, + { id: 'invalid1' }, // Missing required properties + null, + undefined, + { id: 'valid2', name: 'Another Valid', type: 'app', binding_count: 0 }, + ] + + // Filter out invalid entries + const validTags = mixedData.filter((tag): tag is { id: string; name: string; type: string; binding_count: number } => + tag != null + && typeof tag === 'object' + && 'id' in tag + && 'name' in tag + && 'type' in tag + && 'binding_count' in tag + && typeof tag.binding_count === 'number', + ) + + expect(validTags.length).toBe(2) + expect(validTags.every(tag => tag.id && tag.name)).toBe(true) + }) + + it('should handle concurrent tag operations correctly', () => { + const operations = [ + { type: 'add', tagIds: ['tag1', 'tag2'] }, + { type: 'remove', tagIds: ['tag3'] }, + { type: 'add', tagIds: ['tag4'] }, + ] + + // Simulate processing operations + const results = operations.map(op => ({ + ...op, + processed: true, + timestamp: Date.now(), + })) + + expect(results.length).toBe(3) + expect(results.every(result => result.processed)).toBe(true) + }) + }) + + describe('Backward Compatibility Verification', () => { + it('should not break existing AppCard behavior', () => { + // Verify AppCard continues to work with original patterns + const originalAppCardLogic = { + initializeTags: (app: any) => app.tags, + updateTags: (_currentTags: any[], newTags: any[]) => newTags, + shouldRefresh: true, + } + + const app = { tags: [{ id: 'tag1', name: 'original' }] } + const initializedTags = originalAppCardLogic.initializeTags(app) + + expect(initializedTags).toBe(app.tags) + expect(originalAppCardLogic.shouldRefresh).toBe(true) + }) + + it('should ensure AppInfo follows AppCard patterns', () => { + // Verify AppInfo uses compatible state management + const appCardPattern = (app: any) => app.tags + const appInfoPattern = (appDetail: any) => appDetail?.tags || [] + + const appWithTags = { tags: [{ id: 'tag1' }] } + const appWithoutTags = { tags: [] } + const undefinedApp = undefined + + expect(appCardPattern(appWithTags)).toEqual(appInfoPattern(appWithTags)) + expect(appInfoPattern(appWithoutTags)).toEqual([]) + expect(appInfoPattern(undefinedApp)).toEqual([]) + }) + + it('should maintain consistent API parameters', () => { + // Verify service layer maintains expected parameters + const fetchAppListParams = { + url: '/apps', + params: { page: 1, limit: 100 }, + } + + const tagApiParams = { + bindTag: (tagIDs: string[], targetID: string, type: string) => ({ tagIDs, targetID, type }), + unBindTag: (tagID: string, targetID: string, type: string) => ({ tagID, targetID, type }), + } + + expect(fetchAppListParams.url).toBe('/apps') + expect(fetchAppListParams.params.limit).toBe(100) + + const bindResult = tagApiParams.bindTag(['tag1'], 'app1', 'app') + expect(bindResult.tagIDs).toEqual(['tag1']) + expect(bindResult.type).toBe('app') + }) + }) +}) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx index 6b3807f1c6..47d5be29dd 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx @@ -20,12 +20,18 @@ import cn from '@/utils/classnames' import { useStore } from '@/app/components/app/store' import AppSideBar from '@/app/components/app-sidebar' import type { NavIcon } from '@/app/components/app-sidebar/navLink' -import { fetchAppDetail } from '@/service/apps' +import { fetchAppDetail, fetchAppWithTags } from '@/service/apps' import { useAppContext } from '@/context/app-context' import Loading from '@/app/components/base/loading' import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' import type { App } from '@/types/app' import useDocumentTitle from '@/hooks/use-document-title' +import { useStore as useTagStore } from '@/app/components/base/tag-management/store' +import dynamic from 'next/dynamic' + +const TagManagementModal = dynamic(() => import('@/app/components/base/tag-management'), { + ssr: false, +}) export type IAppDetailLayoutProps = { children: React.ReactNode @@ -48,6 +54,7 @@ const AppDetailLayout: FC = (props) => { setAppDetail: state.setAppDetail, setAppSiderbarExpand: state.setAppSiderbarExpand, }))) + const showTagManagementModal = useTagStore(s => s.showTagManagementModal) const [isLoadingAppDetail, setIsLoadingAppDetail] = useState(false) const [appDetailRes, setAppDetailRes] = useState(null) const [navigation, setNavigation] = useState = (props) => { useEffect(() => { setAppDetail() setIsLoadingAppDetail(true) - fetchAppDetail({ url: '/apps', id: appId }).then((res) => { + fetchAppDetail({ url: '/apps', id: appId }).then(async (res) => { + if (!res.tags || res.tags.length === 0) { + try { + const appWithTags = await fetchAppWithTags(appId) + if (appWithTags?.tags) + res.tags = appWithTags.tags + } + catch (error) { + // Fallback failed, continue with empty tags + } + } setAppDetailRes(res) }).catch((e: any) => { if (e.status === 404) @@ -163,6 +180,9 @@ const AppDetailLayout: FC = (props) => {
    {children}
    + {showTagManagementModal && ( + + )} ) } diff --git a/web/app/components/app-sidebar/app-info.tsx b/web/app/components/app-sidebar/app-info.tsx index c04d79d2f2..a197e7b10b 100644 --- a/web/app/components/app-sidebar/app-info.tsx +++ b/web/app/components/app-sidebar/app-info.tsx @@ -1,7 +1,7 @@ import { useTranslation } from 'react-i18next' import { useRouter } from 'next/navigation' import { useContext } from 'use-context-selector' -import React, { useCallback, useState } from 'react' +import React, { useCallback, useEffect, useState } from 'react' import { RiDeleteBinLine, RiEditLine, @@ -18,6 +18,8 @@ import { ToastContext } from '@/app/components/base/toast' import { useAppContext } from '@/context/app-context' import { useProviderContext } from '@/context/provider-context' import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/apps' +import type { Tag } from '@/app/components/base/tag-management/constant' +import TagSelector from '@/app/components/base/tag-management/selector' import type { DuplicateAppModalProps } from '@/app/components/app/duplicate-modal' import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal' import { NEED_REFRESH_APP_LIST_KEY } from '@/config' @@ -73,6 +75,11 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx const [showImportDSLModal, setShowImportDSLModal] = useState(false) const [secretEnvList, setSecretEnvList] = useState([]) + const [tags, setTags] = useState(appDetail?.tags || []) + useEffect(() => { + setTags(appDetail?.tags || []) + }, [appDetail?.tags]) + const onEdit: CreateAppModalProps['onConfirm'] = useCallback(async ({ name, icon_type, @@ -303,8 +310,35 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx imageUrl={appDetail.icon_url} />
    -
    {appDetail.name}
    -
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    +
    +
    +
    +
    {appDetail.name}
    + {isCurrentWorkspaceEditor && ( +
    { + e.stopPropagation() + e.preventDefault() + }}> +
    + tag.id)} + selectedTags={tags} + onCacheUpdate={setTags} + onChange={() => { + // Optional: could trigger a refresh if needed + }} + minWidth='true' + /> +
    +
    + )} +
    +
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    +
    +
    {/* description */} diff --git a/web/app/components/base/tag-management/filter.tsx b/web/app/components/base/tag-management/filter.tsx index 1ce56e8f0d..ecc159b2fc 100644 --- a/web/app/components/base/tag-management/filter.tsx +++ b/web/app/components/base/tag-management/filter.tsx @@ -33,6 +33,7 @@ const TagFilter: FC = ({ const tagList = useTagStore(s => s.tagList) const setTagList = useTagStore(s => s.setTagList) + const setShowTagManagementModal = useTagStore(s => s.setShowTagManagementModal) const [keywords, setKeywords] = useState('') const [searchKeywords, setSearchKeywords] = useState('') @@ -136,6 +137,15 @@ const TagFilter: FC = ({ )} +
    +
    +
    setShowTagManagementModal(true)}> + +
    + {t('common.tag.manageTags')} +
    +
    +
    diff --git a/web/app/components/base/tag-management/selector.tsx b/web/app/components/base/tag-management/selector.tsx index 2678be2f17..cd03eb84bc 100644 --- a/web/app/components/base/tag-management/selector.tsx +++ b/web/app/components/base/tag-management/selector.tsx @@ -1,5 +1,5 @@ import type { FC } from 'react' -import { useMemo, useState } from 'react' +import { useEffect, useMemo, useState } from 'react' import { useContext } from 'use-context-selector' import { useTranslation } from 'react-i18next' import { useUnmount } from 'ahooks' @@ -26,6 +26,7 @@ type TagSelectorProps = { selectedTags: Tag[] onCacheUpdate: (tags: Tag[]) => void onChange?: () => void + minWidth?: string } type PanelProps = { @@ -213,6 +214,7 @@ const TagSelector: FC = ({ selectedTags, onCacheUpdate, onChange, + minWidth, }) => { const { t } = useTranslation() @@ -220,10 +222,20 @@ const TagSelector: FC = ({ const setTagList = useTagStore(s => s.setTagList) const getTagList = async () => { - const res = await fetchTagList(type) - setTagList(res) + try { + const res = await fetchTagList(type) + setTagList(res) + } + catch (error) { + setTagList([]) + } } + useEffect(() => { + if (tagList.length === 0) + getTagList() + }, [type]) + const triggerContent = useMemo(() => { if (selectedTags?.length) return selectedTags.filter(selectedTag => tagList.find(tag => tag.id === selectedTag.id)).map(tag => tag.name).join(', ') @@ -266,7 +278,7 @@ const TagSelector: FC = ({ '!w-full !border-0 !p-0 !text-text-tertiary hover:!bg-state-base-hover hover:!text-text-secondary', ) } - popupClassName='!w-full !ring-0' + popupClassName={cn('!w-full !ring-0', minWidth && '!min-w-80')} className={'!z-20 h-fit !w-full'} /> )} diff --git a/web/service/apps.ts b/web/service/apps.ts index 8e506a0987..3fdcf44667 100644 --- a/web/service/apps.ts +++ b/web/service/apps.ts @@ -60,6 +60,21 @@ export const deleteApp: Fetcher = (appID) => { return del(`apps/${appID}`) } +export const fetchAppWithTags = async (appID: string) => { + try { + const appListResponse = await fetchAppList({ + url: '/apps', + params: { page: 1, limit: 100 }, + }) + const appWithTags = appListResponse.data.find(app => app.id === appID) + return appWithTags || null + } + catch (error) { + console.warn('Failed to fetch app with tags:', error) + return null + } +} + export const updateAppSiteStatus: Fetcher }> = ({ url, body }) => { return post(url, { body }) } From d470120a60653c21c6d6afa61648c584e2e8b1c9 Mon Sep 17 00:00:00 2001 From: znn Date: Sun, 3 Aug 2025 16:01:15 +0530 Subject: [PATCH 122/415] retention of data filled on tab switch for tool plugin (#23323) --- .../variable/var-reference-picker.tsx | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx index 0e57db0d8f..21eb9b8655 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx @@ -357,8 +357,26 @@ const VarReferencePicker: FC = ({ options: dynamicOptions, } } - return schema - }, [dynamicOptions]) + + // If we don't have dynamic options but we have a selected value, create a temporary option to preserve the selection during loading + if (isLoading && value && typeof value === 'string') { + const preservedOptions = [{ + value, + label: { en_US: value, zh_Hans: value }, + show_on: [], + }] + return { + ...schema, + options: preservedOptions, + } + } + + // Default case: return schema with empty options + return { + ...schema, + options: [], + } + }, [schema, dynamicOptions, isLoading, value]) const variableCategory = useMemo(() => { if (isEnv) return 'environment' From 90373c71656f6055bc7c848a5f9959ac1438c134 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sun, 3 Aug 2025 18:31:50 +0800 Subject: [PATCH 123/415] fix(i18n): correct translation errors across multiple languages (#23328) --- web/i18n/de-DE/dataset-documents.ts | 1 + web/i18n/de-DE/workflow.ts | 2 ++ web/i18n/es-ES/dataset-documents.ts | 1 + web/i18n/es-ES/workflow.ts | 2 ++ web/i18n/fa-IR/dataset-documents.ts | 1 + web/i18n/fa-IR/workflow.ts | 2 ++ web/i18n/fr-FR/dataset-documents.ts | 1 + web/i18n/fr-FR/workflow.ts | 2 ++ web/i18n/hi-IN/dataset-documents.ts | 1 + web/i18n/hi-IN/workflow.ts | 2 ++ web/i18n/it-IT/dataset-documents.ts | 3 ++- web/i18n/it-IT/workflow.ts | 2 ++ web/i18n/ja-JP/dataset-documents.ts | 1 + web/i18n/ja-JP/workflow.ts | 2 ++ web/i18n/ko-KR/dataset-documents.ts | 3 ++- web/i18n/ko-KR/workflow.ts | 2 ++ web/i18n/pl-PL/dataset-documents.ts | 3 ++- web/i18n/pl-PL/workflow.ts | 2 ++ web/i18n/pt-BR/dataset-documents.ts | 1 + web/i18n/pt-BR/workflow.ts | 2 ++ web/i18n/ro-RO/dataset-documents.ts | 1 + web/i18n/ro-RO/workflow.ts | 2 ++ web/i18n/ru-RU/dataset-documents.ts | 3 ++- web/i18n/ru-RU/workflow.ts | 2 ++ web/i18n/sl-SI/dataset-documents.ts | 1 + web/i18n/sl-SI/workflow.ts | 2 ++ web/i18n/th-TH/common.ts | 2 +- web/i18n/th-TH/dataset-documents.ts | 1 + web/i18n/th-TH/workflow.ts | 2 ++ web/i18n/tr-TR/dataset-documents.ts | 1 + web/i18n/tr-TR/workflow.ts | 2 ++ web/i18n/uk-UA/dataset-documents.ts | 1 + web/i18n/uk-UA/workflow.ts | 2 ++ web/i18n/vi-VN/dataset-documents.ts | 1 + web/i18n/vi-VN/workflow.ts | 2 ++ web/i18n/zh-Hans/dataset-documents.ts | 1 + web/i18n/zh-Hant/dataset-documents.ts | 1 + web/i18n/zh-Hant/workflow.ts | 2 ++ 38 files changed, 60 insertions(+), 5 deletions(-) diff --git a/web/i18n/de-DE/dataset-documents.ts b/web/i18n/de-DE/dataset-documents.ts index 438bcb708d..b17230354b 100644 --- a/web/i18n/de-DE/dataset-documents.ts +++ b/web/i18n/de-DE/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Synchronisieren', resume: 'Fortsetzen', pause: 'Pause', + download: 'Datei herunterladen', }, index: { enable: 'Aktivieren', diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index 72f9642995..1bd965c731 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -927,6 +927,7 @@ const translation = { deleteFailure: 'Version löschen fehlgeschlagen', restoreSuccess: 'Version wiederhergestellt', updateFailure: 'Aktualisierung der Version fehlgeschlagen', + copyIdSuccess: 'ID in die Zwischenablage kopiert', }, latest: 'Neueste', nameThisVersion: 'Nennen Sie diese Version', @@ -937,6 +938,7 @@ const translation = { editVersionInfo: 'Versionsinformationen bearbeiten', deletionTip: 'Die Löschung ist unumkehrbar, bitte bestätigen Sie.', restorationTip: 'Nach der Wiederherstellung der Version wird der aktuelle Entwurf überschrieben.', + copyId: 'ID kopieren', }, debug: { noData: { diff --git a/web/i18n/es-ES/dataset-documents.ts b/web/i18n/es-ES/dataset-documents.ts index 3775873b40..408c4bd0e0 100644 --- a/web/i18n/es-ES/dataset-documents.ts +++ b/web/i18n/es-ES/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'Sincronizar', resume: 'Reanudar', pause: 'Pausa', + download: 'Descargar archivo', }, index: { enable: 'Habilitar', diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index 459121a168..d4958c3c0d 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -927,6 +927,7 @@ const translation = { deleteFailure: 'Error al eliminar la versión', updateFailure: 'Error al actualizar la versión', restoreSuccess: 'Versión restaurada', + copyIdSuccess: 'ID copiado en el portapapeles', }, releaseNotesPlaceholder: 'Describe lo que cambió', restorationTip: 'Después de la restauración de la versión, el borrador actual será sobrescrito.', @@ -937,6 +938,7 @@ const translation = { currentDraft: 'Borrador Actual', editVersionInfo: 'Editar información de la versión', latest: 'Último', + copyId: 'Copiar ID', }, debug: { noData: { diff --git a/web/i18n/fa-IR/dataset-documents.ts b/web/i18n/fa-IR/dataset-documents.ts index cdd14d3881..b9d76e5828 100644 --- a/web/i18n/fa-IR/dataset-documents.ts +++ b/web/i18n/fa-IR/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'همگام‌سازی', resume: 'ادامه', pause: 'مکث', + download: 'دانلود فایل', }, index: { enable: 'فعال کردن', diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index 2f08183151..b3de497c2f 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteFailure: 'حذف نسخه موفق نبود', restoreFailure: 'بازگرداندن نسخه ناموفق بود', updateFailure: 'به‌روزرسانی نسخه ناموفق بود', + copyIdSuccess: 'شناسه در کلیپ بورد کپی شده است', }, latest: 'آخرین', editVersionInfo: 'ویرایش اطلاعات نسخه', @@ -940,6 +941,7 @@ const translation = { releaseNotesPlaceholder: 'شرح دهید چه چیزی تغییر کرده است', restorationTip: 'پس از بازیابی نسخه، پیش‌نویس فعلی بازنویسی خواهد شد.', deletionTip: 'حذف غیرقابل برگشت است، لطفا تأیید کنید.', + copyId: 'شناسه کپی', }, debug: { noData: { diff --git a/web/i18n/fr-FR/dataset-documents.ts b/web/i18n/fr-FR/dataset-documents.ts index debb03a379..6a844129f6 100644 --- a/web/i18n/fr-FR/dataset-documents.ts +++ b/web/i18n/fr-FR/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Synchroniser', pause: 'Pause', resume: 'Reprendre', + download: 'Télécharger le fichier', }, index: { enable: 'Activer', diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index 5e53a8b4ae..adc3eb125c 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteSuccess: 'Version supprimée', updateFailure: 'Échec de la mise à jour de la version', restoreFailure: 'Échec de la restauration de la version', + copyIdSuccess: 'ID copié dans le presse-papiers', }, title: 'Versions', releaseNotesPlaceholder: 'Décrivez ce qui a changé', @@ -940,6 +941,7 @@ const translation = { restorationTip: 'Après la restauration de la version, le brouillon actuel sera écrasé.', deletionTip: 'La suppression est irreversible, veuillez confirmer.', latest: 'Dernier', + copyId: 'Copier l’ID', }, debug: { noData: { diff --git a/web/i18n/hi-IN/dataset-documents.ts b/web/i18n/hi-IN/dataset-documents.ts index 3a4930e04b..15a42b1b50 100644 --- a/web/i18n/hi-IN/dataset-documents.ts +++ b/web/i18n/hi-IN/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'सिंक्रोनाइज़ करें', resume: 'रिज़्यूमे', pause: 'रोकें', + download: 'फ़ाइल डाउनलोड करें', }, index: { enable: 'सक्रिय करें', diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index 95ccead15f..923abfaeb5 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -950,6 +950,7 @@ const translation = { updateSuccess: 'संस्करण अपडेट किया गया', updateFailure: 'संस्करण अपडेट करने में विफल', restoreFailure: 'संस्करण को पुनर्स्थापित करने में विफल', + copyIdSuccess: 'आईडी क्लिपबोर्ड पर कॉपी हो गई', }, latest: 'लेटेस्ट', editVersionInfo: 'संस्करण की जानकारी संपादित करें', @@ -960,6 +961,7 @@ const translation = { restorationTip: 'संस्करण पुनर्स्थापन के बाद, वर्तमान ड्राफ्ट अधिलेखित किया जाएगा।', defaultName: 'अविभाजित संस्करण', deletionTip: 'हटाना अप्रतिबंधी है, कृपया पुष्टि करें।', + copyId: 'आईडी कॉपी करें', }, debug: { noData: { diff --git a/web/i18n/it-IT/dataset-documents.ts b/web/i18n/it-IT/dataset-documents.ts index 66eb00aafd..404fb67bf7 100644 --- a/web/i18n/it-IT/dataset-documents.ts +++ b/web/i18n/it-IT/dataset-documents.ts @@ -29,8 +29,9 @@ const translation = { delete: 'Elimina', enableWarning: 'Il file archiviato non può essere abilitato', sync: 'Sincronizza', - resume: 'Riassumere', + resume: 'Riprendi', pause: 'Pausa', + download: 'Scarica file', }, index: { enable: 'Abilita', diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index ca934428a6..49b3a11e38 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -956,6 +956,7 @@ const translation = { updateSuccess: 'Versione aggiornata', deleteFailure: 'Impossibile eliminare la versione', updateFailure: 'Impossibile aggiornare la versione', + copyIdSuccess: 'ID copiato negli appunti', }, latest: 'Ultimo', defaultName: 'Versione senza titolo', @@ -966,6 +967,7 @@ const translation = { currentDraft: 'Bozza attuale', restorationTip: 'Dopo il ripristino della versione, la bozza attuale verrà sovrascritta.', title: 'Versioni', + copyId: 'Copia ID', }, debug: { noData: { diff --git a/web/i18n/ja-JP/dataset-documents.ts b/web/i18n/ja-JP/dataset-documents.ts index b2638f1b56..d22e3018ed 100644 --- a/web/i18n/ja-JP/dataset-documents.ts +++ b/web/i18n/ja-JP/dataset-documents.ts @@ -32,6 +32,7 @@ const translation = { sync: '同期', pause: '一時停止', resume: '再開', + download: 'ファイルをダウンロード', }, index: { enable: '有効にする', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index 483adb402c..59791c5c7e 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -936,7 +936,9 @@ const translation = { deleteFailure: '削除に失敗しました', updateSuccess: '更新が完了しました', updateFailure: '更新に失敗しました', + copyIdSuccess: 'IDがクリップボードにコピーされました', }, + copyId: 'IDをコピー', }, debug: { noData: { diff --git a/web/i18n/ko-KR/dataset-documents.ts b/web/i18n/ko-KR/dataset-documents.ts index e026144f17..3aa3e9239f 100644 --- a/web/i18n/ko-KR/dataset-documents.ts +++ b/web/i18n/ko-KR/dataset-documents.ts @@ -28,8 +28,9 @@ const translation = { delete: '삭제', enableWarning: '아카이브된 파일은 활성화할 수 없습니다.', sync: '동기화', - resume: '이력서', + resume: '재개', pause: '일시 중지', + download: '파일 다운로드', }, index: { enable: '활성화', diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index 7a7902dfdc..9b1ec69603 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -977,6 +977,7 @@ const translation = { restoreFailure: '버전을 복원하지 못했습니다.', deleteFailure: '버전을 삭제하지 못했습니다.', updateSuccess: '버전이 업데이트되었습니다.', + copyIdSuccess: '클립보드에 복사된 ID', }, editVersionInfo: '버전 정보 편집', latest: '최신', @@ -987,6 +988,7 @@ const translation = { title: '버전 기록', deletionTip: '삭제는 되돌릴 수 없으니, 확인해 주시기 바랍니다.', restorationTip: '버전 복원 후 현재 초안이 덮어쓰여질 것입니다.', + copyId: 'ID 복사', }, debug: { noData: { diff --git a/web/i18n/pl-PL/dataset-documents.ts b/web/i18n/pl-PL/dataset-documents.ts index da543d299a..c0b801ccf5 100644 --- a/web/i18n/pl-PL/dataset-documents.ts +++ b/web/i18n/pl-PL/dataset-documents.ts @@ -28,8 +28,9 @@ const translation = { delete: 'Usuń', enableWarning: 'Zarchiwizowany plik nie może zostać włączony', sync: 'Synchronizuj', - resume: 'Wznawiać', + resume: 'Wznów', pause: 'Pauza', + download: 'Pobierz plik', }, index: { enable: 'Włącz', diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 56b7536879..8c17ba0ff6 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteSuccess: 'Wersja usunięta', restoreSuccess: 'Wersja przywrócona', restoreFailure: 'Nie udało się przywrócić wersji', + copyIdSuccess: 'Identyfikator skopiowany do schowka', }, currentDraft: 'Aktualny szkic', nameThisVersion: 'Nazwij tę wersję', @@ -940,6 +941,7 @@ const translation = { editVersionInfo: 'Edytuj informacje o wersji', deletionTip: 'Usunięcie jest nieodwracalne, proszę potwierdzić.', restorationTip: 'Po przywróceniu wersji bieżący szkic zostanie nadpisany.', + copyId: 'Kopiuj ID', }, debug: { noData: { diff --git a/web/i18n/pt-BR/dataset-documents.ts b/web/i18n/pt-BR/dataset-documents.ts index 30fa87f82f..ca4ad21530 100644 --- a/web/i18n/pt-BR/dataset-documents.ts +++ b/web/i18n/pt-BR/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Sincronizar', resume: 'Retomar', pause: 'Pausa', + download: 'Baixar arquivo', }, index: { enable: 'Habilitar', diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index d5820bd611..4d933994db 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -930,6 +930,7 @@ const translation = { restoreFailure: 'Falha ao restaurar versão', restoreSuccess: 'Versão restaurada', deleteFailure: 'Falha ao deletar versão', + copyIdSuccess: 'ID copiado para a área de transferência', }, title: 'Versões', latest: 'Último', @@ -940,6 +941,7 @@ const translation = { restorationTip: 'Após a restauração da versão, o rascunho atual será substituído.', currentDraft: 'Rascunho Atual', deletionTip: 'A exclusão é irreversível, por favor confirme.', + copyId: 'Copiar ID', }, debug: { noData: { diff --git a/web/i18n/ro-RO/dataset-documents.ts b/web/i18n/ro-RO/dataset-documents.ts index c624d0acde..a6d7ffdfab 100644 --- a/web/i18n/ro-RO/dataset-documents.ts +++ b/web/i18n/ro-RO/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Sincronizează', pause: 'Pauză', resume: 'Reia', + download: 'Descărcați fișierul', }, index: { enable: 'Activează', diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index 4a24f7dc00..b4eb41d041 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteFailure: 'Ștergerea versiunii a eșuat', updateSuccess: 'Versiune actualizată', updateFailure: 'Actualizarea versiunii a eșuat', + copyIdSuccess: 'ID copiat în clipboard', }, latest: 'Cea mai recentă', title: 'Versiuni', @@ -940,6 +941,7 @@ const translation = { releaseNotesPlaceholder: 'Descrie ce s-a schimbat', deletionTip: 'Ștergerea este irreversibilă, vă rugăm să confirmați.', currentDraft: 'Draftul curent', + copyId: 'Copiază ID', }, debug: { noData: { diff --git a/web/i18n/ru-RU/dataset-documents.ts b/web/i18n/ru-RU/dataset-documents.ts index 5a72eb766c..400ada270d 100644 --- a/web/i18n/ru-RU/dataset-documents.ts +++ b/web/i18n/ru-RU/dataset-documents.ts @@ -29,8 +29,9 @@ const translation = { delete: 'Удалить', enableWarning: 'Архивный файл не может быть включен', sync: 'Синхронизировать', - resume: 'Продовжити', + resume: 'Возобновить', pause: 'Пауза', + download: 'Скачать файл', }, index: { enable: 'Включить', diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 284a88c5b2..87982d1331 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteSuccess: 'Версия удалена', updateFailure: 'Не удалось обновить версию', restoreFailure: 'Не удалось восстановить версию', + copyIdSuccess: 'ID скопирован в буфер обмена', }, latest: 'Последний', restorationTip: 'После восстановления версии текущий черновик будет перезаписан.', @@ -940,6 +941,7 @@ const translation = { currentDraft: 'Текущий проект', releaseNotesPlaceholder: 'Опишите, что изменилось', defaultName: 'Без названия версия', + copyId: 'Копировать ID', }, debug: { noData: { diff --git a/web/i18n/sl-SI/dataset-documents.ts b/web/i18n/sl-SI/dataset-documents.ts index ca4f10e798..a163197e86 100644 --- a/web/i18n/sl-SI/dataset-documents.ts +++ b/web/i18n/sl-SI/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'Sinhroniziraj', pause: 'Zaustavi', resume: 'Nadaljuj', + download: 'Prenesi datoteko', }, index: { enable: 'Omogoči', diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index 72150701de..f267fb0d50 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -927,6 +927,7 @@ const translation = { restoreSuccess: 'Obnovljena različica', restoreFailure: 'Obnavljanje različice ni uspelo', updateSuccess: 'Različica posodobljena', + copyIdSuccess: 'ID kopiran v odložišče', }, defaultName: 'Nepodpisana različica', deletionTip: 'Izbris je nepovraten, prosim potrdite.', @@ -937,6 +938,7 @@ const translation = { nameThisVersion: 'Poimenujte to različico', releaseNotesPlaceholder: 'Opisujte, kaj se je spremenilo', restorationTip: 'Po obnovitvi različice bo trenutni osnutek prepisan.', + copyId: 'Kopiraj ID', }, debug: { noData: { diff --git a/web/i18n/th-TH/common.ts b/web/i18n/th-TH/common.ts index dd7dd31cb1..1dcfe63a9d 100644 --- a/web/i18n/th-TH/common.ts +++ b/web/i18n/th-TH/common.ts @@ -137,7 +137,7 @@ const translation = { menus: { status: 'Beta', explore: 'สํารวจ', - apps: 'เรียน', + apps: 'สตูดิโอ', plugins: 'ปลั๊กอิน', pluginsTips: 'รวมปลั๊กอินของบุคคลที่สามหรือสร้างปลั๊กอิน AI ที่เข้ากันได้กับ ChatGPT', datasets: 'ความรู้', diff --git a/web/i18n/th-TH/dataset-documents.ts b/web/i18n/th-TH/dataset-documents.ts index 1471ba365a..539dadfd18 100644 --- a/web/i18n/th-TH/dataset-documents.ts +++ b/web/i18n/th-TH/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'ซิงค์', pause: 'หยุด', resume: 'ดำเนิน', + download: 'ดาวน์โหลดไฟล์', }, index: { enable: 'เปิด', diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index 45b61b011e..a9a1ca7923 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -930,6 +930,7 @@ const translation = { restoreSuccess: 'เวอร์ชันที่กู้คืน', restoreFailure: 'ไม่สามารถกู้คืนเวอร์ชันได้', updateSuccess: 'อัปเดตเวอร์ชัน', + copyIdSuccess: 'คัดลอกรหัสไปยังคลิปบอร์ด', }, releaseNotesPlaceholder: 'อธิบายว่าสิ่งที่เปลี่ยนแปลงไปคืออะไร', currentDraft: 'ร่างปัจจุบัน', @@ -940,6 +941,7 @@ const translation = { nameThisVersion: 'ชื่อเวอร์ชันนี้', title: 'เวอร์ชัน', latest: 'ล่าสุด', + copyId: 'คัดลอก ID', }, debug: { noData: { diff --git a/web/i18n/tr-TR/dataset-documents.ts b/web/i18n/tr-TR/dataset-documents.ts index c40c111d5d..984aad5a0a 100644 --- a/web/i18n/tr-TR/dataset-documents.ts +++ b/web/i18n/tr-TR/dataset-documents.ts @@ -31,6 +31,7 @@ const translation = { sync: 'Senkronize et', pause: 'Duraklat', resume: 'Devam Et', + download: 'Dosyayı İndir', }, index: { enable: 'Etkinleştir', diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 8fac474b26..499ba86807 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -931,6 +931,7 @@ const translation = { updateFailure: 'Sürüm güncellenemedi', updateSuccess: 'Sürüm güncellendi', deleteSuccess: 'Sürüm silindi', + copyIdSuccess: 'Kimlik panoya kopyalandı', }, latest: 'Sonuncu', currentDraft: 'Mevcut Taslak', @@ -941,6 +942,7 @@ const translation = { releaseNotesPlaceholder: 'Değişen şeyleri tanımlayın', nameThisVersion: 'Bu versiyona isim ver', deletionTip: 'Silme işlemi geri alınamaz, lütfen onaylayın.', + copyId: 'ID Kopyala', }, debug: { noData: { diff --git a/web/i18n/uk-UA/dataset-documents.ts b/web/i18n/uk-UA/dataset-documents.ts index 903e8a97c4..f4a40081c5 100644 --- a/web/i18n/uk-UA/dataset-documents.ts +++ b/web/i18n/uk-UA/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Синхронізувати', pause: 'Пауза', resume: 'Продовжити', + download: 'Завантажити файл', }, index: { enable: 'Активувати', diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index f5cf52d8db..dea3704d85 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -930,6 +930,7 @@ const translation = { deleteSuccess: 'Версія видалена', restoreSuccess: 'Версія відновлена', updateFailure: 'Не вдалося оновити версію', + copyIdSuccess: 'ID скопійовано в буфер обміну', }, defaultName: 'Без назви версія', restorationTip: 'Після відновлення версії нинішній проект буде перезаписано.', @@ -940,6 +941,7 @@ const translation = { editVersionInfo: 'Редагувати інформацію про версію', nameThisVersion: 'Назвіть цю версію', latest: 'Останні новини', + copyId: 'Копіювати ідентифікатор', }, debug: { noData: { diff --git a/web/i18n/vi-VN/dataset-documents.ts b/web/i18n/vi-VN/dataset-documents.ts index c6fcd4ed45..1f514a1d6f 100644 --- a/web/i18n/vi-VN/dataset-documents.ts +++ b/web/i18n/vi-VN/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: 'Đồng bộ', pause: 'Tạm dừng', resume: 'Tiếp tục', + download: 'Tải xuống tập tin', }, index: { enable: 'Kích hoạt', diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 77f22613b4..257fd1ed67 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -930,6 +930,7 @@ const translation = { updateSuccess: 'Phiên bản đã được cập nhật', restoreSuccess: 'Phiên bản đã được khôi phục', restoreFailure: 'Không thể khôi phục phiên bản', + copyIdSuccess: 'ID được sao chép vào khay nhớ tạm', }, defaultName: 'Phiên bản không được đặt tên', releaseNotesPlaceholder: 'Mô tả những gì đã thay đổi', @@ -940,6 +941,7 @@ const translation = { nameThisVersion: 'Đặt tên cho phiên bản này', restorationTip: 'Sau khi phục hồi phiên bản, bản nháp hiện tại sẽ bị ghi đè.', title: 'Các phiên bản', + copyId: 'Sao chép ID', }, debug: { noData: { diff --git a/web/i18n/zh-Hans/dataset-documents.ts b/web/i18n/zh-Hans/dataset-documents.ts index 581bc851f7..15e3071e51 100644 --- a/web/i18n/zh-Hans/dataset-documents.ts +++ b/web/i18n/zh-Hans/dataset-documents.ts @@ -32,6 +32,7 @@ const translation = { sync: '同步', pause: '暂停', resume: '恢复', + download: '下载文件', }, index: { enable: '启用中', diff --git a/web/i18n/zh-Hant/dataset-documents.ts b/web/i18n/zh-Hant/dataset-documents.ts index 1b482f181f..7344db2df7 100644 --- a/web/i18n/zh-Hant/dataset-documents.ts +++ b/web/i18n/zh-Hant/dataset-documents.ts @@ -30,6 +30,7 @@ const translation = { sync: '同步', resume: '恢復', pause: '暫停', + download: '下載檔案', }, index: { enable: '啟用中', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index f522e990b0..6a5e990909 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -927,6 +927,7 @@ const translation = { updateSuccess: '版本已更新', deleteSuccess: '版本已刪除', deleteFailure: '無法刪除版本', + copyIdSuccess: 'ID 已複製到剪貼板', }, nameThisVersion: '給這個版本命名', latest: '最新', @@ -937,6 +938,7 @@ const translation = { deletionTip: '刪除是不可逆的,請確認。', releaseNotesPlaceholder: '描述發生了什麼變化', defaultName: '未命名版本', + copyId: '複製ID', }, debug: { noData: { From dedd5f571c7611ea81765f5ec4507ef4607e4309 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Sun, 3 Aug 2025 22:30:21 +0800 Subject: [PATCH 124/415] fix(ui): temporarily remove TagSelector from app sidebar per design review (#23329) --- web/app/components/app-sidebar/app-info.tsx | 40 ++------------------- 1 file changed, 3 insertions(+), 37 deletions(-) diff --git a/web/app/components/app-sidebar/app-info.tsx b/web/app/components/app-sidebar/app-info.tsx index a197e7b10b..c04d79d2f2 100644 --- a/web/app/components/app-sidebar/app-info.tsx +++ b/web/app/components/app-sidebar/app-info.tsx @@ -1,7 +1,7 @@ import { useTranslation } from 'react-i18next' import { useRouter } from 'next/navigation' import { useContext } from 'use-context-selector' -import React, { useCallback, useEffect, useState } from 'react' +import React, { useCallback, useState } from 'react' import { RiDeleteBinLine, RiEditLine, @@ -18,8 +18,6 @@ import { ToastContext } from '@/app/components/base/toast' import { useAppContext } from '@/context/app-context' import { useProviderContext } from '@/context/provider-context' import { copyApp, deleteApp, exportAppConfig, updateAppInfo } from '@/service/apps' -import type { Tag } from '@/app/components/base/tag-management/constant' -import TagSelector from '@/app/components/base/tag-management/selector' import type { DuplicateAppModalProps } from '@/app/components/app/duplicate-modal' import type { CreateAppModalProps } from '@/app/components/explore/create-app-modal' import { NEED_REFRESH_APP_LIST_KEY } from '@/config' @@ -75,11 +73,6 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx const [showImportDSLModal, setShowImportDSLModal] = useState(false) const [secretEnvList, setSecretEnvList] = useState([]) - const [tags, setTags] = useState(appDetail?.tags || []) - useEffect(() => { - setTags(appDetail?.tags || []) - }, [appDetail?.tags]) - const onEdit: CreateAppModalProps['onConfirm'] = useCallback(async ({ name, icon_type, @@ -310,35 +303,8 @@ const AppInfo = ({ expand, onlyShowDetail = false, openState = false, onDetailEx imageUrl={appDetail.icon_url} />
    -
    -
    -
    -
    {appDetail.name}
    - {isCurrentWorkspaceEditor && ( -
    { - e.stopPropagation() - e.preventDefault() - }}> -
    - tag.id)} - selectedTags={tags} - onCacheUpdate={setTags} - onChange={() => { - // Optional: could trigger a refresh if needed - }} - minWidth='true' - /> -
    -
    - )} -
    -
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    -
    -
    +
    {appDetail.name}
    +
    {appDetail.mode === 'advanced-chat' ? t('app.types.advanced') : appDetail.mode === 'agent-chat' ? t('app.types.agent') : appDetail.mode === 'chat' ? t('app.types.chatbot') : appDetail.mode === 'completion' ? t('app.types.completion') : t('app.types.workflow')}
    {/* description */} From 964fa132cbb5b9357a089392b06e2f83877a06e4 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sun, 3 Aug 2025 22:30:28 +0800 Subject: [PATCH 125/415] Chore: fix typo, no code change (#23331) --- api/controllers/console/error.py | 4 ++-- api/services/billing_service.py | 4 ++-- api/services/workflow_draft_variable_service.py | 6 +++--- api/services/workflow_service.py | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/api/controllers/console/error.py b/api/controllers/console/error.py index 6944c56bf8..0a4dfe1c10 100644 --- a/api/controllers/console/error.py +++ b/api/controllers/console/error.py @@ -127,7 +127,7 @@ class EducationActivateLimitError(BaseHTTPException): code = 429 -class CompilanceRateLimitError(BaseHTTPException): - error_code = "compilance_rate_limit" +class ComplianceRateLimitError(BaseHTTPException): + error_code = "compliance_rate_limit" description = "Rate limit exceeded for downloading compliance report." code = 429 diff --git a/api/services/billing_service.py b/api/services/billing_service.py index 5a12aa2e54..476fce0057 100644 --- a/api/services/billing_service.py +++ b/api/services/billing_service.py @@ -159,9 +159,9 @@ class BillingService: ): limiter_key = f"{account_id}:{tenant_id}" if cls.compliance_download_rate_limiter.is_rate_limited(limiter_key): - from controllers.console.error import CompilanceRateLimitError + from controllers.console.error import ComplianceRateLimitError - raise CompilanceRateLimitError() + raise ComplianceRateLimitError() json = { "doc_name": doc_name, diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index 3164e010b4..2d62d49d91 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -422,7 +422,7 @@ class WorkflowDraftVariableService: description=conv_var.description, ) draft_conv_vars.append(draft_var) - _batch_upsert_draft_varaible( + _batch_upsert_draft_variable( self._session, draft_conv_vars, policy=_UpsertPolicy.IGNORE, @@ -434,7 +434,7 @@ class _UpsertPolicy(StrEnum): OVERWRITE = "overwrite" -def _batch_upsert_draft_varaible( +def _batch_upsert_draft_variable( session: Session, draft_vars: Sequence[WorkflowDraftVariable], policy: _UpsertPolicy = _UpsertPolicy.OVERWRITE, @@ -721,7 +721,7 @@ class DraftVariableSaver: draft_vars = self._build_variables_from_start_mapping(outputs) else: draft_vars = self._build_variables_from_mapping(outputs) - _batch_upsert_draft_varaible(self._session, draft_vars) + _batch_upsert_draft_variable(self._session, draft_vars) @staticmethod def _should_variable_be_editable(node_id: str, name: str) -> bool: diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index 0c5d29b78e..d2715a61fe 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -444,9 +444,9 @@ class WorkflowService: self, node_data: dict, tenant_id: str, user_id: str, node_id: str, user_inputs: dict[str, Any] ) -> WorkflowNodeExecution: """ - Run draft workflow node + Run free workflow node """ - # run draft workflow node + # run free workflow node start_at = time.perf_counter() node_execution = self._handle_node_run_result( From 0ebcee9a6bdc4876ba54bc803d0ee86a431c5170 Mon Sep 17 00:00:00 2001 From: znn Date: Mon, 4 Aug 2025 07:44:57 +0530 Subject: [PATCH 126/415] fixing footer (#22927) --- web/app/components/apps/footer.tsx | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/web/app/components/apps/footer.tsx b/web/app/components/apps/footer.tsx index 7bee272342..18b7779651 100644 --- a/web/app/components/apps/footer.tsx +++ b/web/app/components/apps/footer.tsx @@ -1,6 +1,6 @@ -import React from 'react' +import React, { useState } from 'react' import Link from 'next/link' -import { RiDiscordFill, RiGithubFill } from '@remixicon/react' +import { RiCloseLine, RiDiscordFill, RiGithubFill } from '@remixicon/react' import { useTranslation } from 'react-i18next' type CustomLinkProps = { @@ -26,9 +26,24 @@ const CustomLink = React.memo(({ const Footer = () => { const { t } = useTranslation() + const [isVisible, setIsVisible] = useState(true) + + const handleClose = () => { + setIsVisible(false) + } + + if (!isVisible) + return null return ( -
    +
    +

    {t('app.join')}

    {t('app.communityIntro')}

    From 406c1952b8ec769a0360704176a1d008869f5dd0 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Mon, 4 Aug 2025 10:40:49 +0800 Subject: [PATCH 127/415] Fix version comparison with imported_version (#23326) Signed-off-by: Yongtao Huang --- .../rag/datasource/vdb/elasticsearch/elasticsearch_vector.py | 3 ++- api/services/app_dsl_service.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index 9dea050dc3..49c4b392fe 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -7,6 +7,7 @@ from urllib.parse import urlparse import requests from elasticsearch import Elasticsearch from flask import current_app +from packaging.version import parse as parse_version from pydantic import BaseModel, model_validator from core.rag.datasource.vdb.field import Field @@ -149,7 +150,7 @@ class ElasticSearchVector(BaseVector): return cast(str, info["version"]["number"]) def _check_version(self): - if self._version < "8.0.0": + if parse_version(self._version) < parse_version("8.0.0"): raise ValueError("Elasticsearch vector database version must be greater than 8.0.0") def get_type(self) -> str: diff --git a/api/services/app_dsl_service.py b/api/services/app_dsl_service.py index fe0efd061d..2aa9f6cabd 100644 --- a/api/services/app_dsl_service.py +++ b/api/services/app_dsl_service.py @@ -12,6 +12,7 @@ import yaml # type: ignore from Crypto.Cipher import AES from Crypto.Util.Padding import pad, unpad from packaging import version +from packaging.version import parse as parse_version from pydantic import BaseModel, Field from sqlalchemy import select from sqlalchemy.orm import Session @@ -269,7 +270,7 @@ class AppDslService: check_dependencies_pending_data = None if dependencies: check_dependencies_pending_data = [PluginDependency.model_validate(d) for d in dependencies] - elif imported_version <= "0.1.5": + elif parse_version(imported_version) <= parse_version("0.1.5"): if "workflow" in data: graph = data.get("workflow", {}).get("graph", {}) dependencies_list = self._extract_dependencies_from_workflow_graph(graph) From 54c8bd29ee354155391a7668cccf6ec6fd20fe22 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Mon, 4 Aug 2025 12:48:38 +0800 Subject: [PATCH 128/415] security: Fix XSS vulnerability in authentication check-code pages (#23295) --- web/__tests__/xss-fix-verification.test.tsx | 212 ++++++++++++++++++ .../webapp-reset-password/check-code/page.tsx | 5 +- .../webapp-signin/check-code/page.tsx | 5 +- web/app/reset-password/check-code/page.tsx | 5 +- web/app/signin/check-code/page.tsx | 5 +- web/i18n/de-DE/login.ts | 2 +- web/i18n/en-US/login.ts | 2 +- web/i18n/es-ES/login.ts | 2 +- web/i18n/fa-IR/login.ts | 3 +- web/i18n/fr-FR/login.ts | 3 +- web/i18n/hi-IN/login.ts | 3 +- web/i18n/it-IT/login.ts | 3 +- web/i18n/ja-JP/login.ts | 2 +- web/i18n/ko-KR/login.ts | 2 +- web/i18n/pl-PL/login.ts | 3 +- web/i18n/pt-BR/login.ts | 3 +- web/i18n/ro-RO/login.ts | 3 +- web/i18n/ru-RU/login.ts | 3 +- web/i18n/sl-SI/login.ts | 3 +- web/i18n/th-TH/login.ts | 2 +- web/i18n/tr-TR/login.ts | 3 +- web/i18n/uk-UA/login.ts | 3 +- web/i18n/vi-VN/login.ts | 3 +- web/i18n/zh-Hans/login.ts | 2 +- web/i18n/zh-Hant/login.ts | 2 +- 25 files changed, 248 insertions(+), 36 deletions(-) create mode 100644 web/__tests__/xss-fix-verification.test.tsx diff --git a/web/__tests__/xss-fix-verification.test.tsx b/web/__tests__/xss-fix-verification.test.tsx new file mode 100644 index 0000000000..2fa5ab3c05 --- /dev/null +++ b/web/__tests__/xss-fix-verification.test.tsx @@ -0,0 +1,212 @@ +/** + * XSS Fix Verification Test + * + * This test verifies that the XSS vulnerability in check-code pages has been + * properly fixed by replacing dangerouslySetInnerHTML with safe React rendering. + */ + +import React from 'react' +import { cleanup, render } from '@testing-library/react' +import '@testing-library/jest-dom' + +// Mock i18next with the new safe translation structure +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => { + if (key === 'login.checkCode.tipsPrefix') + return 'We send a verification code to ' + + return key + }, + }), +})) + +// Mock Next.js useSearchParams +jest.mock('next/navigation', () => ({ + useSearchParams: () => ({ + get: (key: string) => { + if (key === 'email') + return 'test@example.com' + return null + }, + }), +})) + +// Fixed CheckCode component implementation (current secure version) +const SecureCheckCodeComponent = ({ email }: { email: string }) => { + const { t } = require('react-i18next').useTranslation() + + return ( +
    +

    Check Code

    +

    + + {t('login.checkCode.tipsPrefix')} + {email} + +

    +
    + ) +} + +// Vulnerable implementation for comparison (what we fixed) +const VulnerableCheckCodeComponent = ({ email }: { email: string }) => { + const mockTranslation = (key: string, params?: any) => { + if (key === 'login.checkCode.tips' && params?.email) + return `We send a verification code to ${params.email}` + + return key + } + + return ( +
    +

    Check Code

    +

    + +

    +
    + ) +} + +describe('XSS Fix Verification - Check Code Pages Security', () => { + afterEach(() => { + cleanup() + }) + + const maliciousEmail = 'test@example.com' + + it('should securely render email with HTML characters as text (FIXED VERSION)', () => { + console.log('\n🔒 Security Fix Verification Report') + console.log('===================================') + + const { container } = render() + + const spanElement = container.querySelector('span') + const strongElement = container.querySelector('strong') + const scriptElements = container.querySelectorAll('script') + + console.log('\n✅ Fixed Implementation Results:') + console.log('- Email rendered in strong tag:', strongElement?.textContent) + console.log('- HTML tags visible as text:', strongElement?.textContent?.includes('', + 'normal@email.com', + ] + + testCases.forEach((testEmail, index) => { + const { container } = render() + + const strongElement = container.querySelector('strong') + const scriptElements = container.querySelectorAll('script') + const imgElements = container.querySelectorAll('img') + const divElements = container.querySelectorAll('div:not([data-testid])') + + console.log(`\n📧 Test Case ${index + 1}: ${testEmail.substring(0, 20)}...`) + console.log(` - Script elements: ${scriptElements.length}`) + console.log(` - Img elements: ${imgElements.length}`) + console.log(` - Malicious divs: ${divElements.length - 1}`) // -1 for container div + console.log(` - Text content: ${strongElement?.textContent === testEmail ? 'SAFE' : 'ISSUE'}`) + + // All should be safe + expect(scriptElements).toHaveLength(0) + expect(imgElements).toHaveLength(0) + expect(strongElement?.textContent).toBe(testEmail) + }) + + console.log('\n✅ All test cases passed - secure rendering confirmed') + }) + + it('should validate the translation structure is secure', () => { + console.log('\n🔍 Translation Security Analysis') + console.log('=================================') + + const { t } = require('react-i18next').useTranslation() + const prefix = t('login.checkCode.tipsPrefix') + + console.log('- Translation key used: login.checkCode.tipsPrefix') + console.log('- Translation value:', prefix) + console.log('- Contains HTML tags:', prefix.includes('<')) + console.log('- Pure text content:', !prefix.includes('<') && !prefix.includes('>')) + + // Verify translation is plain text + expect(prefix).toBe('We send a verification code to ') + expect(prefix).not.toContain('<') + expect(prefix).not.toContain('>') + expect(typeof prefix).toBe('string') + + console.log('\n✅ Translation structure is secure - no HTML content') + }) + + it('should confirm React automatic escaping works correctly', () => { + console.log('\n⚡ React Security Mechanism Test') + console.log('=================================') + + // Test React's automatic escaping with various inputs + const dangerousInputs = [ + '', + '', + '">', + '\'>alert(3)', + '
    click
    ', + ] + + dangerousInputs.forEach((input, index) => { + const TestComponent = () => {input} + const { container } = render() + + const strongElement = container.querySelector('strong') + const scriptElements = container.querySelectorAll('script') + + console.log(`\n🧪 Input ${index + 1}: ${input.substring(0, 30)}...`) + console.log(` - Rendered as text: ${strongElement?.textContent === input}`) + console.log(` - No script execution: ${scriptElements.length === 0}`) + + expect(strongElement?.textContent).toBe(input) + expect(scriptElements).toHaveLength(0) + }) + + console.log('\n🛡️ React automatic escaping is working perfectly') + }) +}) + +export {} diff --git a/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx b/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx index da754794b1..91e1021610 100644 --- a/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx +++ b/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx @@ -70,7 +70,10 @@ export default function CheckCode() {

    {t('login.checkCode.checkYourEmail')}

    - + + {t('login.checkCode.tipsPrefix')} + {email} +
    {t('login.checkCode.validTime')}

    diff --git a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx index a2ba620ace..c80a006583 100644 --- a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx +++ b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx @@ -93,7 +93,10 @@ export default function CheckCode() {

    {t('login.checkCode.checkYourEmail')}

    - + + {t('login.checkCode.tipsPrefix')} + {email} +
    {t('login.checkCode.validTime')}

    diff --git a/web/app/reset-password/check-code/page.tsx b/web/app/reset-password/check-code/page.tsx index 480b513112..a2dfda1e5f 100644 --- a/web/app/reset-password/check-code/page.tsx +++ b/web/app/reset-password/check-code/page.tsx @@ -70,7 +70,10 @@ export default function CheckCode() {

    {t('login.checkCode.checkYourEmail')}

    - + + {t('login.checkCode.tipsPrefix')} + {email} +
    {t('login.checkCode.validTime')}

    diff --git a/web/app/signin/check-code/page.tsx b/web/app/signin/check-code/page.tsx index 912df62f1d..9c3f7768f8 100644 --- a/web/app/signin/check-code/page.tsx +++ b/web/app/signin/check-code/page.tsx @@ -71,7 +71,10 @@ export default function CheckCode() {

    {t('login.checkCode.checkYourEmail')}

    - + + {t('login.checkCode.tipsPrefix')} + {email} +
    {t('login.checkCode.validTime')}

    diff --git a/web/i18n/de-DE/login.ts b/web/i18n/de-DE/login.ts index 7ef0e2420a..ef87f05f79 100644 --- a/web/i18n/de-DE/login.ts +++ b/web/i18n/de-DE/login.ts @@ -79,9 +79,9 @@ const translation = { useAnotherMethod: 'Verwenden Sie eine andere Methode', validTime: 'Beachten Sie, dass der Code 5 Minuten lang gültig ist', emptyCode: 'Code ist erforderlich', - tips: 'Wir senden einen Verifizierungscode an {{email}}', invalidCode: 'Ungültiger Code', resend: 'Wieder senden', + tipsPrefix: 'Wir senden einen Bestätigungscode an', }, or: 'ODER', back: 'Zurück', diff --git a/web/i18n/en-US/login.ts b/web/i18n/en-US/login.ts index d47eb7c079..a00e73b901 100644 --- a/web/i18n/en-US/login.ts +++ b/web/i18n/en-US/login.ts @@ -79,7 +79,7 @@ const translation = { validate: 'Validate', checkCode: { checkYourEmail: 'Check your email', - tips: 'We send a verification code to {{email}}', + tipsPrefix: 'We send a verification code to ', validTime: 'Bear in mind that the code is valid for 5 minutes', verificationCode: 'Verification code', verificationCodePlaceholder: 'Enter 6-digit code', diff --git a/web/i18n/es-ES/login.ts b/web/i18n/es-ES/login.ts index fda14f3708..8fd82ecb85 100644 --- a/web/i18n/es-ES/login.ts +++ b/web/i18n/es-ES/login.ts @@ -78,10 +78,10 @@ const translation = { emptyCode: 'Se requiere código', useAnotherMethod: 'Usar otro método', resend: 'Reenviar', - tips: 'Enviamos un código de verificación a {{email}}', verificationCode: 'Código de verificación', validTime: 'Ten en cuenta que el código es válido durante 5 minutos', invalidCode: 'Código no válido', + tipsPrefix: 'Enviamos un código de verificación a', }, or: 'O', back: 'Atrás', diff --git a/web/i18n/fa-IR/login.ts b/web/i18n/fa-IR/login.ts index da2e5197eb..2b6098b95e 100644 --- a/web/i18n/fa-IR/login.ts +++ b/web/i18n/fa-IR/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'نام کاربری شما', forget: 'رمز عبور خود را فراموش کرده‌اید؟', signBtn: 'ورود', - sso: 'ادامه با SSO', installBtn: 'راه‌اندازی', setAdminAccount: 'راه‌اندازی حساب مدیر', setAdminAccountDesc: 'بیشترین امتیازات برای حساب مدیر، که می‌تواند برای ایجاد برنامه‌ها و مدیریت ارائه‌دهندگان LLM و غیره استفاده شود.', @@ -81,8 +80,8 @@ const translation = { useAnotherMethod: 'از روش دیگری استفاده کنید', checkYourEmail: 'ایمیل خود را بررسی کنید', validTime: 'به خاطر داشته باشید که کد 5 دقیقه اعتبار دارد', - tips: 'کد درستی سنجی را به {{email}} ارسال می کنیم', resend: 'ارسال مجدد', + tipsPrefix: 'ما یک کد تأیید می‌فرستیم به ', }, or: 'یا', back: 'بازگشت', diff --git a/web/i18n/fr-FR/login.ts b/web/i18n/fr-FR/login.ts index 9e718cad2d..38b8159158 100644 --- a/web/i18n/fr-FR/login.ts +++ b/web/i18n/fr-FR/login.ts @@ -70,7 +70,6 @@ const translation = { activated: 'Connectez-vous maintenant', adminInitPassword: 'Mot de passe d\'initialisation de l\'administrateur', validate: 'Valider', - sso: 'Poursuivre avec l’authentification unique', checkCode: { verificationCode: 'Code de vérification', useAnotherMethod: 'Utiliser une autre méthode', @@ -82,7 +81,7 @@ const translation = { invalidCode: 'Code non valide', checkYourEmail: 'Vérifiez vos e-mails', validTime: 'Gardez à l’esprit que le code est valable 5 minutes', - tips: 'Nous envoyons un code de vérification à {{email}}', + tipsPrefix: 'Nous envoyons un code de vérification à', }, sendVerificationCode: 'Envoyer le code de vérification', or: 'OU', diff --git a/web/i18n/hi-IN/login.ts b/web/i18n/hi-IN/login.ts index 06019042b5..e89cea327a 100644 --- a/web/i18n/hi-IN/login.ts +++ b/web/i18n/hi-IN/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'आपका उपयोगकर्ता नाम', forget: 'क्या आप पासवर्ड भूल गए?', signBtn: 'साइन इन करें', - sso: 'SSO के साथ जारी रखें', installBtn: 'सेट अप करें', setAdminAccount: 'एडमिन खाता सेट कर रहे हैं', setAdminAccountDesc: @@ -86,8 +85,8 @@ const translation = { resend: 'भेजें', checkYourEmail: 'अपना ईमेल जांचें', validTime: 'ध्यान रखें कि कोड 5 मिनट के लिए वैध है', - tips: 'हम {{email}} को एक सत्यापन कोड भेजते हैं', verificationCodePlaceholder: '6-अंक कोड दर्ज करें', + tipsPrefix: 'हम एक सत्यापन कोड भेजते हैं', }, sendVerificationCode: 'पुष्टि कोड भेजें', or: 'नहीं तो', diff --git a/web/i18n/it-IT/login.ts b/web/i18n/it-IT/login.ts index 47ae79bdd9..5009f99519 100644 --- a/web/i18n/it-IT/login.ts +++ b/web/i18n/it-IT/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Il tuo nome utente', forget: 'Hai dimenticato la password?', signBtn: 'Accedi', - sso: 'Continua con SSO', installBtn: 'Configura', setAdminAccount: 'Impostazione di un account amministratore', setAdminAccountDesc: @@ -91,8 +90,8 @@ const translation = { validTime: 'Tieni presente che il codice è valido per 5 minuti', didNotReceiveCode: 'Non hai ricevuto il codice?', checkYourEmail: 'Controlla la tua email', - tips: 'Inviamo un codice di verifica a {{email}}', useAnotherMethod: 'Usa un altro metodo', + tipsPrefix: 'Inviamo un codice di verifica a', }, or: 'O', back: 'Indietro', diff --git a/web/i18n/ja-JP/login.ts b/web/i18n/ja-JP/login.ts index 833bedf719..7c116c4c18 100644 --- a/web/i18n/ja-JP/login.ts +++ b/web/i18n/ja-JP/login.ts @@ -78,10 +78,10 @@ const translation = { didNotReceiveCode: 'コードが届きませんか?', resend: '再送', verificationCode: '認証コード', - tips: '確認コードを{{email}}に送信します。', validTime: 'コードは 5 分間有効であることに注意してください', emptyCode: 'コードが必要です', checkYourEmail: 'メールをチェックしてください', + tipsPrefix: '私たちは確認コードを送信します', }, useVerificationCode: '確認コードを使用する', or: '又は', diff --git a/web/i18n/ko-KR/login.ts b/web/i18n/ko-KR/login.ts index 51b68967c2..b050d4b9f5 100644 --- a/web/i18n/ko-KR/login.ts +++ b/web/i18n/ko-KR/login.ts @@ -73,7 +73,6 @@ const translation = { checkCode: { verify: '확인', verificationCode: '인증 코드', - tips: '{{email}}로 인증 코드를 보내드립니다.', validTime: '코드는 5 분 동안 유효합니다', checkYourEmail: '이메일 주소 확인', invalidCode: '유효하지 않은 코드', @@ -82,6 +81,7 @@ const translation = { useAnotherMethod: '다른 방법 사용', didNotReceiveCode: '코드를 받지 못하셨나요?', resend: '재전송', + tipsPrefix: '우리는 확인 코드를 보냅니다', }, back: '뒤로', or: '또는', diff --git a/web/i18n/pl-PL/login.ts b/web/i18n/pl-PL/login.ts index 8b63fec502..909d1a431f 100644 --- a/web/i18n/pl-PL/login.ts +++ b/web/i18n/pl-PL/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Twoja nazwa użytkownika', forget: 'Zapomniałeś hasła?', signBtn: 'Zaloguj się', - sso: 'Kontynuuj za pomocą SSO', installBtn: 'Ustaw', setAdminAccount: 'Ustawianie konta administratora', setAdminAccountDesc: @@ -86,8 +85,8 @@ const translation = { useAnotherMethod: 'Użyj innej metody', didNotReceiveCode: 'Nie otrzymałeś kodu?', verificationCode: 'Kod weryfikacyjny', - tips: 'Wysyłamy kod weryfikacyjny na adres {{email}}', emptyCode: 'Kod jest wymagany', + tipsPrefix: 'Wysyłamy kod weryfikacyjny do', }, continueWithCode: 'Kontynuuj z kodem', setYourAccount: 'Ustaw swoje konto', diff --git a/web/i18n/pt-BR/login.ts b/web/i18n/pt-BR/login.ts index 290cd3c8b4..150df678d9 100644 --- a/web/i18n/pt-BR/login.ts +++ b/web/i18n/pt-BR/login.ts @@ -70,19 +70,18 @@ const translation = { activated: 'Entrar agora', adminInitPassword: 'Senha de inicialização do administrador', validate: 'Validar', - sso: 'Continuar com SSO', checkCode: { useAnotherMethod: 'Use outro método', invalidCode: 'Código inválido', verificationCodePlaceholder: 'Digite o código de 6 dígitos', checkYourEmail: 'Verifique seu e-mail', - tips: 'Enviamos um código de verificação para {{email}}', emptyCode: 'O código é necessário', verify: 'Verificar', verificationCode: 'Código de verificação', resend: 'Reenviar', didNotReceiveCode: 'Não recebeu o código?', validTime: 'Lembre-se de que o código é válido por 5 minutos', + tipsPrefix: 'Enviamos um código de verificação para', }, resetPassword: 'Redefinir senha', or: 'OU', diff --git a/web/i18n/ro-RO/login.ts b/web/i18n/ro-RO/login.ts index 342010a10e..ca1b1d4e01 100644 --- a/web/i18n/ro-RO/login.ts +++ b/web/i18n/ro-RO/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Numele tău de utilizator', forget: 'Ai uitat parola?', signBtn: 'Autentificare', - sso: 'Continuă cu SSO', installBtn: 'Configurare', setAdminAccount: 'Configurare cont de administrator', setAdminAccountDesc: 'Privilegii maxime pentru contul de administrator, care poate fi utilizat pentru crearea de aplicații și gestionarea furnizorilor LLM, etc.', @@ -80,9 +79,9 @@ const translation = { verificationCodePlaceholder: 'Introduceți codul din 6 cifre', emptyCode: 'Codul este necesar', verify: 'Verifica', - tips: 'Trimitem un cod de verificare la {{email}}', useAnotherMethod: 'Utilizați o altă metodă', resend: 'Retrimite', + tipsPrefix: 'Trimitem un cod de verificare la', }, usePassword: 'Utilizați parola', useVerificationCode: 'Utilizarea codului de verificare', diff --git a/web/i18n/ru-RU/login.ts b/web/i18n/ru-RU/login.ts index 38e4559012..874b0aef0b 100644 --- a/web/i18n/ru-RU/login.ts +++ b/web/i18n/ru-RU/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Ваше имя пользователя', forget: 'Забыли пароль?', signBtn: 'Войти', - sso: 'Продолжить с SSO', installBtn: 'Настроить', setAdminAccount: 'Настройка учетной записи администратора', setAdminAccountDesc: 'Максимальные привилегии для учетной записи администратора, которые можно использовать для создания приложений, управления поставщиками LLM и т. д.', @@ -79,10 +78,10 @@ const translation = { emptyCode: 'Код обязателен для заполнения', verificationCode: 'Проверочный код', checkYourEmail: 'Проверьте свою электронную почту', - tips: 'Мы отправляем код подтверждения на {{email}}', validTime: 'Имейте в виду, что код действителен в течение 5 минут', verificationCodePlaceholder: 'Введите 6-значный код', useAnotherMethod: 'Используйте другой метод', + tipsPrefix: 'Мы отправляем код проверки на', }, back: 'Назад', changePasswordBtn: 'Установите пароль', diff --git a/web/i18n/sl-SI/login.ts b/web/i18n/sl-SI/login.ts index 479b8b9221..acb6aba2c6 100644 --- a/web/i18n/sl-SI/login.ts +++ b/web/i18n/sl-SI/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Vaše uporabniško ime', forget: 'Ste pozabili geslo?', signBtn: 'Prijava', - sso: 'Nadaljujte z SSO', installBtn: 'Namesti', setAdminAccount: 'Nastavitev administratorskega računa', setAdminAccountDesc: 'Najvišje pravice za administratorski račun, ki se lahko uporablja za ustvarjanje aplikacij in upravljanje LLM ponudnikov itd.', @@ -76,13 +75,13 @@ const translation = { verificationCodePlaceholder: 'Vnesite 6-mestno kodo', resend: 'Poslati', verificationCode: 'Koda za preverjanje', - tips: 'Kodo za preverjanje pošljemo na {{email}}', verify: 'Preveriti', validTime: 'Upoštevajte, da je koda veljavna 5 minut', checkYourEmail: 'Preverjanje e-pošte', didNotReceiveCode: 'Niste prejeli kode?', invalidCode: 'Neveljavna koda', useAnotherMethod: 'Uporabite drug način', + tipsPrefix: 'Pošljemo kodo za preverjanje na', }, useVerificationCode: 'Uporaba kode za preverjanje', licenseInactive: 'Licenca je neaktivna', diff --git a/web/i18n/th-TH/login.ts b/web/i18n/th-TH/login.ts index 3db8da4da8..621b9999a0 100644 --- a/web/i18n/th-TH/login.ts +++ b/web/i18n/th-TH/login.ts @@ -79,7 +79,6 @@ const translation = { validate: 'ตรวจ สอบ', checkCode: { checkYourEmail: 'ตรวจสอบอีเมลของคุณ', - tips: 'เราส่งรหัสยืนยันไปที่ {{email}}', validTime: 'โปรดทราบว่ารหัสนี้ใช้ได้นาน 5 นาที', verificationCode: 'รหัสยืนยัน', verificationCodePlaceholder: 'ป้อนรหัส 6 หลัก', @@ -89,6 +88,7 @@ const translation = { useAnotherMethod: 'ใช้วิธีอื่น', emptyCode: 'ต้องใช้รหัส', invalidCode: 'รหัสไม่ถูกต้อง', + tipsPrefix: 'เราส่งรหัสตรวจสอบไปยัง', }, resetPassword: 'รีเซ็ตรหัสผ่าน', resetPasswordDesc: 'พิมพ์อีเมลที่คุณใช้ลงทะเบียนบน Dify แล้วเราจะส่งอีเมลรีเซ็ตรหัสผ่านให้คุณ', diff --git a/web/i18n/tr-TR/login.ts b/web/i18n/tr-TR/login.ts index b525dd0dd7..96832ae581 100644 --- a/web/i18n/tr-TR/login.ts +++ b/web/i18n/tr-TR/login.ts @@ -9,7 +9,6 @@ const translation = { namePlaceholder: 'Kullanıcı adınız', forget: 'Şifrenizi mi unuttunuz?', signBtn: 'Giriş yap', - sso: 'SSO ile devam et', installBtn: 'Kurulum', setAdminAccount: 'Yönetici hesabı ayarlama', setAdminAccountDesc: 'Yönetici hesabı için maksimum ayrıcalıklar, uygulama oluşturma ve LLM sağlayıcılarını yönetme gibi işlemler için kullanılabilir.', @@ -81,8 +80,8 @@ const translation = { verificationCodePlaceholder: '6 haneli kodu girin', useAnotherMethod: 'Başka bir yöntem kullanın', didNotReceiveCode: 'Kodu almadınız mı?', - tips: '{{email}} adresine bir doğrulama kodu gönderiyoruz', resend: 'Tekrar Gönder', + tipsPrefix: 'Bir doğrulama kodu gönderiyoruz', }, enterYourName: 'Lütfen kullanıcı adınızı giriniz', resetPassword: 'Şifre Sıfırlama', diff --git a/web/i18n/uk-UA/login.ts b/web/i18n/uk-UA/login.ts index b586f3f243..a6b8d725e8 100644 --- a/web/i18n/uk-UA/login.ts +++ b/web/i18n/uk-UA/login.ts @@ -70,7 +70,6 @@ const translation = { activated: 'Увійти зараз', adminInitPassword: 'Пароль ініціалізації адміністратора', validate: 'Перевірити', - sso: 'Продовжуйте працювати з SSW', checkCode: { didNotReceiveCode: 'Не отримали код?', invalidCode: 'Невірний код', @@ -81,8 +80,8 @@ const translation = { verify: 'Перевірити', verificationCode: 'Код підтвердження', useAnotherMethod: 'Використовуйте інший спосіб', - tips: 'Ми надсилаємо код підтвердження на адресу {{email}}', validTime: 'Майте на увазі, що код дійсний протягом 5 хвилин', + tipsPrefix: 'Ми відправляємо код підтвердження на', }, back: 'Задній', backToLogin: 'Назад до входу', diff --git a/web/i18n/vi-VN/login.ts b/web/i18n/vi-VN/login.ts index 520d5250a8..1e770402dd 100644 --- a/web/i18n/vi-VN/login.ts +++ b/web/i18n/vi-VN/login.ts @@ -70,7 +70,6 @@ const translation = { activated: 'Đăng nhập ngay', adminInitPassword: 'Mật khẩu khởi tạo quản trị viên', validate: 'Xác thực', - sso: 'Tiếp tục với SSO', checkCode: { checkYourEmail: 'Kiểm tra email của bạn', verify: 'Xác minh', @@ -82,7 +81,7 @@ const translation = { useAnotherMethod: 'Sử dụng phương pháp khác', emptyCode: 'Mã là bắt buộc', verificationCodePlaceholder: 'Nhập mã gồm 6 chữ số', - tips: 'Chúng tôi gửi mã xác minh đến {{email}}', + tipsPrefix: 'Chúng tôi gửi mã xác minh đến', }, back: 'Lưng', withSSO: 'Tiếp tục với SSO', diff --git a/web/i18n/zh-Hans/login.ts b/web/i18n/zh-Hans/login.ts index 2276436d0e..d0b2cbe8c5 100644 --- a/web/i18n/zh-Hans/login.ts +++ b/web/i18n/zh-Hans/login.ts @@ -79,7 +79,6 @@ const translation = { validate: '验证', checkCode: { checkYourEmail: '验证您的电子邮件', - tips: '验证码已经发送到您的邮箱 {{email}}', validTime: '请注意验证码 5 分钟内有效', verificationCode: '验证码', verificationCodePlaceholder: '输入 6 位验证码', @@ -89,6 +88,7 @@ const translation = { useAnotherMethod: '使用其他方式登录', emptyCode: '验证码不能为空', invalidCode: '验证码无效', + tipsPrefix: '我们发送一个验证码到', }, resetPassword: '重置密码', resetPasswordDesc: '请输入您的电子邮件地址以重置密码。我们将向您发送一封电子邮件。', diff --git a/web/i18n/zh-Hant/login.ts b/web/i18n/zh-Hant/login.ts index 8187323276..64f8122857 100644 --- a/web/i18n/zh-Hant/login.ts +++ b/web/i18n/zh-Hant/login.ts @@ -76,12 +76,12 @@ const translation = { didNotReceiveCode: '沒有收到驗證碼?', emptyCode: '驗證碼是必需的', checkYourEmail: '檢查您的電子郵件', - tips: '我們將驗證碼發送到 {{email}}', verificationCodePlaceholder: '輸入 6 位代碼', useAnotherMethod: '使用其他方法', validTime: '請記住,該代碼的有效期為 5 分鐘', verificationCode: '驗證碼', invalidCode: '無效代碼', + tipsPrefix: '我們發送一個驗證碼到', }, continueWithCode: 'Continue With Code', or: '或', From 78d2f49e01ad7ce7215006783f2bcbbebf96e576 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Mon, 4 Aug 2025 13:30:11 +0800 Subject: [PATCH 129/415] feat(i18n): enhance auto-gen script for template literals and add app-debug translations (#23344) --- web/i18n-config/auto-gen-i18n.js | 46 ++- web/i18n/de-DE/app-debug.ts | 151 +++++++++- web/i18n/es-ES/app-debug.ts | 103 +++++++ web/i18n/fa-IR/app-debug.ts | 286 ++++++++++++++++++ web/i18n/fr-FR/app-debug.ts | 104 +++++++ web/i18n/hi-IN/app-debug.ts | 128 ++++++++ web/i18n/it-IT/app-debug.ts | 103 +++++++ web/i18n/ko-KR/app-debug.ts | 104 +++++++ web/i18n/pl-PL/app-debug.ts | 104 +++++++ web/i18n/pt-BR/app-debug.ts | 108 +++++++ web/i18n/ro-RO/app-debug.ts | 104 +++++++ web/i18n/ru-RU/app-debug.ts | 50 ++++ web/i18n/sl-SI/app-debug.ts | 296 +++++++++++++++++++ web/i18n/th-TH/app-debug.ts | 481 +++++++++++++++++++++++++++++++ web/i18n/tr-TR/app-debug.ts | 100 +++++-- web/i18n/uk-UA/app-debug.ts | 197 +++++++++++-- web/i18n/vi-VN/app-debug.ts | 104 +++++++ web/i18n/zh-Hant/app-debug.ts | 104 +++++++ 18 files changed, 2600 insertions(+), 73 deletions(-) diff --git a/web/i18n-config/auto-gen-i18n.js b/web/i18n-config/auto-gen-i18n.js index 45f5606393..9a8e741063 100644 --- a/web/i18n-config/auto-gen-i18n.js +++ b/web/i18n-config/auto-gen-i18n.js @@ -42,6 +42,13 @@ async function translateMissingKeyDeeply(sourceObj, targetObject, toLanguage) { return } + // Skip template literal placeholders + if (source === 'TEMPLATE_LITERAL_PLACEHOLDER') { + console.log(`⏭️ Skipping template literal key: "${key}"`) + skippedKeys.push(`${key}: ${source}`) + return + } + // Only skip obvious code patterns, not normal text with parentheses const codePatterns = [ /\{\{.*\}\}/, // Template variables like {{key}} @@ -102,6 +109,15 @@ async function autoGenTrans(fileName, toGenLanguage, isDryRun = false) { try { const content = fs.readFileSync(fullKeyFilePath, 'utf8') + // Temporarily replace template literals with regular strings for AST parsing + // This allows us to process other keys while skipping problematic ones + let processedContent = content + const templateLiteralPattern = /(resolutionTooltip):\s*`([^`]*)`/g + processedContent = processedContent.replace(templateLiteralPattern, (match, key, value) => { + console.log(`⏭️ Temporarily replacing template literal for key: ${key}`) + return `${key}: "TEMPLATE_LITERAL_PLACEHOLDER"` + }) + // Create a safer module environment for vm const moduleExports = {} const context = { @@ -114,7 +130,7 @@ async function autoGenTrans(fileName, toGenLanguage, isDryRun = false) { } // Use vm.runInNewContext instead of eval for better security - vm.runInNewContext(transpile(content), context) + vm.runInNewContext(transpile(processedContent), context) const fullKeyContent = moduleExports.default || moduleExports @@ -132,7 +148,14 @@ export default translation // To keep object format and format it for magicast to work: const translation = { ... } => export default {...} const readContent = await loadFile(toGenLanguageFilePath) const { code: toGenContent } = generateCode(readContent) - const mod = await parseModule(`export default ${toGenContent.replace('export default translation', '').replace('const translation = ', '')}`) + + // Also handle template literals in target file content + let processedToGenContent = toGenContent + processedToGenContent = processedToGenContent.replace(templateLiteralPattern, (match, key, value) => { + console.log(`⏭️ Temporarily replacing template literal in target file for key: ${key}`) + return `${key}: "TEMPLATE_LITERAL_PLACEHOLDER"` + }) + const mod = await parseModule(`export default ${processedToGenContent.replace('export default translation', '').replace('const translation = ', '')}`) const toGenOutPut = mod.exports.default console.log(`\n🌍 Processing ${fileName} for ${toGenLanguage}...`) @@ -151,11 +174,26 @@ export default translation } const { code } = generateCode(mod) - const res = `const translation =${code.replace('export default', '')} + let res = `const translation =${code.replace('export default', '')} export default translation `.replace(/,\n\n/g, ',\n').replace('};', '}') + // Restore original template literals by reading from the original target file if it exists + if (fs.existsSync(toGenLanguageFilePath)) { + const originalContent = fs.readFileSync(toGenLanguageFilePath, 'utf8') + // Extract original template literal content for resolutionTooltip + const originalMatch = originalContent.match(/(resolutionTooltip):\s*`([^`]*)`/s) + if (originalMatch) { + const [fullMatch, key, value] = originalMatch + res = res.replace( + `${key}: "TEMPLATE_LITERAL_PLACEHOLDER"`, + `${key}: \`${value}\``, + ) + console.log(`🔄 Restored original template literal for key: ${key}`) + } + } + if (!isDryRun) { fs.writeFileSync(toGenLanguageFilePath, res) console.log(`💾 Saved translations to ${toGenLanguageFilePath}`) @@ -190,7 +228,7 @@ async function main() { .readdirSync(path.resolve(__dirname, i18nFolder, targetLanguage)) .filter(file => /\.ts$/.test(file)) // Only process .ts files .map(file => file.replace(/\.ts$/, '')) - .filter(f => f !== 'app-debug') // ast parse error in app-debug + // Removed app-debug exclusion, now only skip specific problematic keys // Filter by target file if specified const filesToProcess = targetFile ? files.filter(f => f === targetFile) : files diff --git a/web/i18n/de-DE/app-debug.ts b/web/i18n/de-DE/app-debug.ts index 93511faf55..68f674b76a 100644 --- a/web/i18n/de-DE/app-debug.ts +++ b/web/i18n/de-DE/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Moderater Inhalt aktiviert', }, fileUpload: { title: 'Datei-Upload', @@ -241,6 +242,7 @@ const translation = { 'Bitte warten Sie auf die Antwort auf die Stapelaufgabe, um abzuschließen.', notSelectModel: 'Bitte wählen Sie ein Modell', waitForImgUpload: 'Bitte warten Sie, bis das Bild hochgeladen ist', + waitForFileUpload: 'Bitte warten Sie, bis die Datei(en) hochgeladen sind', }, chatSubTitle: 'Anweisungen', completionSubTitle: 'Vor-Prompt', @@ -276,20 +278,62 @@ const translation = { queryNoBeEmpty: 'Anfrage muss im Prompt gesetzt sein', }, variableConfig: { - description: 'Einstellung für Variable {{varName}}', - fieldType: 'Feldtyp', - string: 'Kurztext', - paragraph: 'Absatz', - select: 'Auswählen', - notSet: 'Nicht gesetzt, versuchen Sie, {{input}} im Vor-Prompt zu tippen', - stringTitle: 'Formular-Textfeldoptionen', - maxLength: 'Maximale Länge', - options: 'Optionen', - addOption: 'Option hinzufügen', - apiBasedVar: 'API-basierte Variable', - defaultValue: 'Standardwert', - noDefaultValue: 'Kein Standardwert', - selectDefaultValue: 'Standardwert auswählen', + 'description': 'Einstellung für Variable {{varName}}', + 'fieldType': 'Feldtyp', + 'string': 'Kurztext', + 'paragraph': 'Absatz', + 'select': 'Auswählen', + 'notSet': 'Nicht gesetzt, versuchen Sie, {{input}} im Vor-Prompt zu tippen', + 'stringTitle': 'Formular-Textfeldoptionen', + 'maxLength': 'Maximale Länge', + 'options': 'Optionen', + 'addOption': 'Option hinzufügen', + 'apiBasedVar': 'API-basierte Variable', + 'defaultValue': 'Standardwert', + 'noDefaultValue': 'Kein Standardwert', + 'selectDefaultValue': 'Standardwert auswählen', + 'file': { + image: { + name: 'Bild', + }, + audio: { + name: 'Audio', + }, + document: { + name: 'Dokument', + }, + video: { + name: 'Video', + }, + custom: { + description: 'Geben Sie andere Dateitypen an.', + createPlaceholder: ' Dateiendung, z.B. .doc', + name: 'Andere Dateitypen', + }, + supportFileTypes: 'Unterstützte Dateitypen', + }, + 'errorMsg': { + optionRepeat: 'Hat Wiederholungsoptionen', + atLeastOneOption: 'Mindestens eine Option ist erforderlich', + labelNameRequired: 'Labelname ist erforderlich', + varNameCanBeRepeat: 'Variablenname kann nicht wiederholt werden', + }, + 'multi-files': 'Dateiliste', + 'varName': 'Variablenname', + 'content': 'Inhalt', + 'single-file': 'Einzelne Datei', + 'labelName': 'Kennsatzname', + 'addModalTitle': 'Eingabefeld hinzufügen', + 'hide': 'Verstecken', + 'inputPlaceholder': 'Bitte geben Sie ein', + 'both': 'Beide', + 'uploadFileTypes': 'Dateitypen hochladen', + 'maxNumberOfUploads': 'Maximale Anzahl von Uploads', + 'number': 'Zahl', + 'editModalTitle': 'Eingabefeld bearbeiten', + 'required': 'Erforderlich', + 'text-input': 'Kurztext', + 'localUpload': 'Lokaler Upload', }, vision: { name: 'Vision', @@ -309,6 +353,7 @@ const translation = { url: 'URL', uploadLimit: 'Upload-Limit', }, + onlySupportVisionModelTip: 'Unterstützt nur Bildverarbeitungsmodelle', }, voice: { name: 'Stimme', @@ -320,6 +365,9 @@ const translation = { language: 'Sprache', resolutionTooltip: 'Text-zu-Sprache unterstützte Sprache.', voice: 'Stimme', + autoPlay: 'Automatische Wiedergabe', + autoPlayEnabled: 'Auf', + autoPlayDisabled: 'Aus', }, }, openingStatement: { @@ -376,6 +424,8 @@ const translation = { score_threshold: 'Schwellenwert', score_thresholdTip: 'Wird verwendet, um den Ähnlichkeitsschwellenwert für die Abschnittsfilterung einzustellen.', retrieveChangeTip: 'Das Ändern des Indexmodus und des Abfragemodus kann Anwendungen beeinflussen, die mit diesem Wissen verbunden sind.', + embeddingModelRequired: 'Ein konfiguriertes Einbettungsmodell ist erforderlich', + knowledgeTip: 'Klicken Sie auf die Schaltfläche " ", um Wissen hinzuzufügen', }, debugAsSingleModel: 'Als Einzelmodell debuggen', debugAsMultipleModel: 'Als Mehrfachmodelle debuggen', @@ -417,6 +467,79 @@ const translation = { enabled: 'Aktiviert', }, }, + codegen: { + applyChanges: 'Änderungen übernehmen', + generatedCodeTitle: 'Generierter Code', + instructionPlaceholder: 'Geben Sie eine detaillierte Beschreibung des Codes ein, den Sie generieren möchten.', + overwriteConfirmMessage: 'Durch diese Aktion wird der vorhandene Code überschrieben. Möchten Sie fortfahren?', + title: 'Codegenerator', + noDataLine1: 'Beschreiben Sie links Ihren Anwendungsfall,', + loading: 'Code wird generiert...', + resTitle: 'Generierter Code', + description: 'Der Code-Generator verwendet konfigurierte Modelle, um qualitativ hochwertigen Code basierend auf Ihren Anweisungen zu generieren. Bitte geben Sie klare und detaillierte Anweisungen.', + instruction: 'Anweisungen', + apply: 'Anwenden', + generate: 'Erzeugen', + overwriteConfirmTitle: 'Vorhandenen Code überschreiben?', + noDataLine2: 'Die Codevorschau wird hier angezeigt.', + }, + generate: { + template: { + pythonDebugger: { + instruction: 'Ein Bot, der Ihren Code basierend auf Ihren Anweisungen generieren und debuggen kann', + name: 'Python-Debugger', + }, + translation: { + instruction: 'Ein Übersetzer, der mehrere Sprachen übersetzen kann', + name: 'Übersetzung', + }, + professionalAnalyst: { + name: 'Professioneller Analyst', + instruction: 'Extrahieren Sie Erkenntnisse, identifizieren Sie Risiken und destillieren Sie wichtige Informationen aus langen Berichten in einem einzigen Memo', + }, + excelFormulaExpert: { + instruction: 'Ein Chatbot, der Anfängern helfen kann, Excel-Formeln basierend auf Benutzeranweisungen zu verstehen, zu verwenden und zu erstellen', + name: 'Excel-Formel-Experte', + }, + travelPlanning: { + instruction: 'Der Reiseplanungsassistent ist ein intelligentes Tool, mit dem Benutzer ihre Reisen mühelos planen können', + name: 'Reiseplanung', + }, + SQLSorcerer: { + name: 'SQL-Zauberer', + instruction: 'Verwandeln Sie alltägliche Sprache in SQL-Abfragen', + }, + GitGud: { + name: 'Git gud', + instruction: 'Generieren geeigneter Git-Befehle basierend auf vom Benutzer beschriebenen Aktionen zur Versionskontrolle', + }, + meetingTakeaways: { + instruction: 'Fassen Sie Meetings in prägnante Zusammenfassungen zusammen, die Diskussionsthemen, wichtige Erkenntnisse und Aktionspunkte enthalten', + name: 'Takeaways für Meetings', + }, + writingsPolisher: { + instruction: 'Verwenden Sie fortgeschrittene Lektoratstechniken, um Ihre Texte zu verbessern', + name: 'Polierer für Schreibstil', + }, + }, + title: 'Eingabeaufforderungs-Generator', + apply: 'Anwenden', + overwriteTitle: 'Vorhandene Konfiguration überschreiben?', + instructionPlaceHolder: 'Schreiben Sie klare und spezifische Anweisungen.', + noDataLine1: 'Beschreiben Sie links Ihren Anwendungsfall,', + noDataLine2: 'Die Orchestrierungsvorschau wird hier angezeigt.', + instruction: 'Anweisungen', + tryIt: 'Versuch es', + generate: 'Erzeugen', + overwriteMessage: 'Durch Anwenden dieser Eingabeaufforderung wird die vorhandene Konfiguration überschrieben.', + loading: 'Orchestrieren Sie die Anwendung für Sie...', + resTitle: 'Generierte Eingabeaufforderung', + description: 'Der Eingabeaufforderungsgenerator verwendet das konfigurierte Modell, um Eingabeaufforderungen für eine höhere Qualität und bessere Struktur zu optimieren. Bitte schreiben Sie klare und detaillierte Anweisungen.', + }, + warningMessage: { + timeoutExceeded: 'Die Ergebnisse werden aufgrund einer Zeitüberschreitung nicht angezeigt. Bitte beziehen Sie sich auf die Protokolle, um die vollständigen Ergebnisse zu erhalten.', + }, + noResult: 'Hier wird die Ausgabe angezeigt.', } export default translation diff --git a/web/i18n/es-ES/app-debug.ts b/web/i18n/es-ES/app-debug.ts index dbdc32c36b..dd670fab04 100644 --- a/web/i18n/es-ES/app-debug.ts +++ b/web/i18n/es-ES/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Contenido moderado habilitado', }, fileUpload: { title: 'Subida de archivos', @@ -240,6 +241,7 @@ const translation = { waitForBatchResponse: 'Por favor espera la respuesta a la tarea por lotes para completar.', notSelectModel: 'Por favor elige un modelo', waitForImgUpload: 'Por favor espera a que la imagen se cargue', + waitForFileUpload: 'Espere a que se cargue el archivo o los archivos', }, chatSubTitle: 'Instrucciones', completionSubTitle: 'Prefijo de la Indicación', @@ -302,6 +304,32 @@ const translation = { 'defaultValue': 'Valor predeterminado', 'noDefaultValue': 'Sin valor predeterminado', 'selectDefaultValue': 'Seleccionar valor predeterminado', + 'file': { + image: { + name: 'Imagen', + }, + audio: { + name: 'Audio', + }, + document: { + name: 'Documento', + }, + video: { + name: 'Vídeo', + }, + custom: { + name: 'Otros tipos de archivos', + description: 'Especifique otros tipos de archivo.', + createPlaceholder: ' Extensión de archivo, por ejemplo, .doc', + }, + supportFileTypes: 'Tipos de archivos de soporte', + }, + 'uploadFileTypes': 'Cargar tipos de archivos', + 'localUpload': 'Carga local', + 'both': 'ambos', + 'single-file': 'En fila india', + 'maxNumberOfUploads': 'Número máximo de cargas', + 'multi-files': 'Lista de archivos', }, vision: { name: 'Visión', @@ -321,6 +349,7 @@ const translation = { url: 'URL', uploadLimit: 'Límite de carga', }, + onlySupportVisionModelTip: 'Solo admite modelos de visión', }, voice: { name: 'Voz', @@ -389,6 +418,7 @@ const translation = { score_threshold: 'Umbral de Puntuación', score_thresholdTip: 'Usado para establecer el umbral de similitud para la filtración de fragmentos.', retrieveChangeTip: 'Modificar el modo de índice y el modo de recuperación puede afectar las aplicaciones asociadas con este Conocimiento.', + embeddingModelRequired: 'Se requiere un modelo de incrustación configurado', }, debugAsSingleModel: 'Depurar como Modelo Único', debugAsMultipleModel: 'Depurar como Múltiples Modelos', @@ -430,6 +460,79 @@ const translation = { enabled: 'Habilitado', }, }, + codegen: { + apply: 'Aplicar', + overwriteConfirmMessage: 'Esta acción sobrescribirá el código existente. ¿Quieres continuar?', + instruction: 'Instrucciones', + loading: 'Generando código...', + title: 'Generador de código', + resTitle: 'Código generado', + description: 'El Generador de código utiliza modelos configurados para generar código de alta calidad basado en sus instrucciones. Proporcione instrucciones claras y detalladas.', + noDataLine1: 'Describa su caso de uso a la izquierda,', + generate: 'Generar', + generatedCodeTitle: 'Código generado', + noDataLine2: 'La vista previa del código se mostrará aquí.', + overwriteConfirmTitle: '¿Sobrescribir el código existente?', + instructionPlaceholder: 'Introduzca una descripción detallada del código que desea generar.', + applyChanges: 'Aplicar cambios', + }, + generate: { + template: { + pythonDebugger: { + name: 'Depurador de Python', + instruction: 'Un bot que puede generar y depurar el código en función de las instrucciones', + }, + translation: { + instruction: 'Un traductor que puede traducir varios idiomas', + name: 'Traducción', + }, + professionalAnalyst: { + instruction: 'Extraiga información, identifique riesgos y extraiga información clave de informes largos en un solo memorándum', + name: 'Analista profesional', + }, + excelFormulaExpert: { + name: 'Experto en fórmulas de Excel', + instruction: 'Un chatbot que puede ayudar a los usuarios novatos a comprender, usar y crear fórmulas de Excel basadas en las instrucciones del usuario', + }, + travelPlanning: { + instruction: 'El Asistente de planificación de viajes es una herramienta inteligente diseñada para ayudar a los usuarios a planificar sus viajes sin esfuerzo', + name: 'Planificación de viajes', + }, + SQLSorcerer: { + instruction: 'Transforme el lenguaje cotidiano en consultas SQL', + name: 'Hechicero SQL', + }, + GitGud: { + name: 'Git gud', + instruction: 'Generar comandos de Git adecuados basados en acciones de control de versiones descritas por el usuario', + }, + meetingTakeaways: { + name: 'Conclusiones de la reunión', + instruction: 'Destilar las reuniones en resúmenes concisos que incluyan temas de discusión, conclusiones clave y elementos de acción', + }, + writingsPolisher: { + name: 'Pulidora de escritura', + instruction: 'Utiliza técnicas avanzadas de corrección de textos para mejorar tus escritos', + }, + }, + apply: 'Aplicar', + instruction: 'Instrucciones', + noDataLine2: 'La vista previa de orquestación se mostrará aquí.', + description: 'El generador de mensajes utiliza el modelo configurado para optimizar los mensajes para una mayor calidad y una mejor estructura. Escriba instrucciones claras y detalladas.', + generate: 'Generar', + title: 'Generador de avisos', + tryIt: 'Pruébalo', + overwriteMessage: 'La aplicación de este mensaje anulará la configuración existente.', + resTitle: 'Mensaje generado', + noDataLine1: 'Describa su caso de uso a la izquierda,', + overwriteTitle: '¿Anular la configuración existente?', + loading: 'Orquestando la aplicación para usted...', + instructionPlaceHolder: 'Escriba instrucciones claras y específicas.', + }, + warningMessage: { + timeoutExceeded: 'Los resultados no se muestran debido al tiempo de espera. Consulte los registros para obtener resultados completos.', + }, + noResult: 'La salida se mostrará aquí.', } export default translation diff --git a/web/i18n/fa-IR/app-debug.ts b/web/i18n/fa-IR/app-debug.ts index 5efbb9421b..333fb68620 100644 --- a/web/i18n/fa-IR/app-debug.ts +++ b/web/i18n/fa-IR/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'محتوای متوسط فعال شده است', }, generate: { title: 'تولید کننده دستورالعمل', @@ -478,6 +479,291 @@ const translation = { description: 'فعال‌سازی صوت به مدل اجازه می‌دهد فایل‌های صوتی را برای رونویسی و تجزیه و تحلیل پردازش کند.', }, }, + codegen: { + apply: 'درخواست', + resTitle: 'کد تولید شده', + generate: 'تولید', + loading: 'تولید کد...', + applyChanges: 'اعمال تغییرات', + generatedCodeTitle: 'کد تولید شده', + title: 'ژنراتور کد', + instruction: 'دستورالعمل', + instructionPlaceholder: 'توضیحات دقیق کدی را که می خواهید تولید کنید وارد کنید.', + overwriteConfirmMessage: 'این عمل کد موجود را بازنویسی می کند. آیا می خواهید ادامه دهید؟', + overwriteConfirmTitle: 'کد موجود را بازنویسی کنید؟', + noDataLine2: 'پیش نمایش کد در اینجا نشان داده می شود.', + noDataLine1: 'مورد استفاده خود را در سمت چپ شرح دهید،', + description: 'Code Generator از مدل های پیکربندی شده برای تولید کد با کیفیت بالا بر اساس دستورالعمل های شما استفاده می کند. لطفا دستورالعمل های واضح و دقیق ارائه دهید.', + }, + generate: { + template: { + pythonDebugger: { + name: 'اشکال زدایی پایتون', + instruction: 'رباتی که می تواند کد شما را بر اساس دستورالعمل شما تولید و اشکال زدایی کند', + }, + translation: { + name: 'ترجمه', + instruction: 'مترجمی که می تواند چندین زبان را ترجمه کند', + }, + professionalAnalyst: { + name: 'تحلیلگر حرفه ای', + instruction: 'استخراج بینش، شناسایی ریسک و تقطیر اطلاعات کلیدی از گزارش های طولانی در یک یادداشت', + }, + excelFormulaExpert: { + name: 'کارشناس فرمول اکسل', + instruction: 'یک ربات چت که می تواند به کاربران تازه کار کمک کند تا فرمول های اکسل را بر اساس دستورالعمل های کاربر درک، استفاده و ایجاد کنند', + }, + travelPlanning: { + name: 'برنامه ریزی سفر', + instruction: 'دستیار برنامه ریزی سفر ابزاری هوشمند است که برای کمک به کاربران در برنامه ریزی بدون زحمت سفرهای خود طراحی شده است', + }, + SQLSorcerer: { + name: 'جادوگر SQL', + instruction: 'تبدیل زبان روزمره به کوئری های SQL', + }, + GitGud: { + name: 'گیت گود', + instruction: 'فرمان های Git مناسب را بر اساس اکشن های کنترل نسخه توصیف شده توسط کاربر ایجاد کنید', + }, + meetingTakeaways: { + name: 'نکات مهم جلسه', + instruction: 'جلسات را به خلاصه های مختصر از جمله موضوعات بحث، نکات کلیدی و موارد اقدام تقطیر کنید', + }, + writingsPolisher: { + name: 'پولیش نوشتن', + instruction: 'از تکنیک های پیشرفته ویرایش کپی برای بهبود نوشته های خود استفاده کنید', + }, + }, + title: 'ژنراتور سریع', + resTitle: 'اعلان تولید شده', + overwriteTitle: 'پیکربندی موجود را لغو کنید؟', + generate: 'تولید', + noDataLine1: 'مورد استفاده خود را در سمت چپ شرح دهید،', + apply: 'درخواست', + instruction: 'دستورالعمل', + overwriteMessage: 'اعمال این اعلان پیکربندی موجود را لغو می کند.', + instructionPlaceHolder: 'دستورالعمل های واضح و مشخص بنویسید.', + tryIt: 'آن را امتحان کنید', + noDataLine2: 'پیش نمایش ارکستراسیون در اینجا نشان داده می شود.', + loading: 'هماهنگ کردن برنامه برای شما...', + description: 'Prompt Generator از مدل پیکربندی شده برای بهینه سازی درخواست ها برای کیفیت بالاتر و ساختار بهتر استفاده می کند. لطفا دستورالعمل های واضح و دقیق بنویسید.', + }, + resetConfig: { + title: 'بازنشانی را تأیید کنید؟', + message: 'بازنشانی تغییرات را دور می اندازد و آخرین پیکربندی منتشر شده را بازیابی می کند.', + }, + errorMessage: { + notSelectModel: 'لطفا یک مدل را انتخاب کنید', + waitForResponse: 'لطفا منتظر بمانید تا پاسخ به پیام قبلی کامل شود.', + queryRequired: 'درخواست متن الزامی است.', + waitForFileUpload: 'لطفا منتظر بمانید تا فایل/فایل ها آپلود شوند', + waitForImgUpload: 'لطفا منتظر بمانید تا تصویر آپلود شود', + waitForBatchResponse: 'لطفا منتظر بمانید تا پاسخ به کار دسته ای تکمیل شود.', + }, + warningMessage: { + timeoutExceeded: 'نتایج به دلیل مهلت زمانی نمایش داده نمی شوند. لطفا برای جمع آوری نتایج کامل به گزارش ها مراجعه کنید.', + }, + variableTable: { + key: 'کلید متغیر', + typeSelect: 'انتخاب', + action: 'اقدامات', + typeString: 'رشته', + name: 'نام فیلد ورودی کاربر', + type: 'نوع ورودی', + optional: 'اختیاری', + }, + varKeyError: {}, + otherError: { + promptNoBeEmpty: 'اعلان نمی تواند خالی باشد', + historyNoBeEmpty: 'سابقه مکالمه باید در اعلان تنظیم شود', + queryNoBeEmpty: 'پرس و جو باید در اعلان تنظیم شود', + }, + variableConfig: { + 'file': { + image: { + name: 'تصویر', + }, + audio: { + name: 'صوتی', + }, + document: { + name: 'سند', + }, + video: { + name: 'ویدئو', + }, + custom: { + name: 'انواع فایل های دیگر', + createPlaceholder: ' پسوند فایل، به عنوان مثال .doc', + description: 'انواع فایل های دیگر را مشخص کنید.', + }, + supportFileTypes: 'انواع فایل های پشتیبانی', + }, + 'errorMsg': { + optionRepeat: 'دارای گزینه های تکرار', + varNameCanBeRepeat: 'نام متغیر را نمی توان تکرار کرد', + labelNameRequired: 'نام برچسب الزامی است', + atLeastOneOption: 'حداقل یک گزینه مورد نیاز است', + }, + 'number': 'شماره', + 'hide': 'مخفی کردن', + 'both': 'هر دو', + 'single-file': 'تک فایل', + 'select': 'انتخاب', + 'inputPlaceholder': 'لطفا وارد کنید', + 'editModalTitle': 'ویرایش فیلد ورودی', + 'paragraph': 'پاراگراف', + 'fieldType': 'نوع فیلد', + 'maxLength': 'حداکثر طول', + 'varName': 'نام متغیر', + 'maxNumberOfUploads': 'حداکثر تعداد آپلود', + 'noDefaultValue': 'بدون مقدار پیش فرض', + 'addOption': 'افزودن گزینه', + 'stringTitle': 'گزینه های جعبه متن فرم', + 'options': 'گزینه', + 'selectDefaultValue': 'مقدار پیش فرض را انتخاب کنید', + 'content': 'محتوای', + 'multi-files': 'لیست فایل ها', + 'labelName': 'نام برچسب', + 'defaultValue': 'مقدار پیش فرض', + 'required': 'مورد نیاز', + 'uploadFileTypes': 'آپلود انواع فایل', + 'apiBasedVar': 'متغیر مبتنی بر API', + 'addModalTitle': 'افزودن فیلد ورودی', + 'string': 'متن کوتاه', + 'text-input': 'متن کوتاه', + 'localUpload': 'آپلود محلی', + }, + vision: { + visionSettings: { + url: 'آدرس', + high: 'بالا', + low: 'کم', + resolution: 'وضوح', + uploadLimit: 'محدودیت آپلود', + uploadMethod: 'روش آپلود', + localUpload: 'آپلود محلی', + title: 'تنظیمات بینایی', + both: 'هر دو', + }, + settings: 'تنظیمات', + name: 'چشم انداز', + onlySupportVisionModelTip: 'فقط از مدل های بینایی پشتیبانی می کند', + description: 'Enable Vision به مدل اجازه می دهد تا تصاویر را بگیرد و به سؤالات مربوط به آنها پاسخ دهد.', + }, + voice: { + voiceSettings: { + voice: 'صوتی', + autoPlayEnabled: 'در', + autoPlayDisabled: 'خاموش', + language: 'زبان', + title: 'تنظیمات صدا', + resolutionTooltip: 'زبان پشتیبانی از صدای متن به گفتار。', + autoPlay: 'پخش خودکار', + }, + settings: 'تنظیمات', + name: 'صوتی', + description: 'متن به گفتار به گفتار تنظیمات', + defaultDisplay: 'صدای پیش فرض', + }, + openingStatement: { + tooShort: 'حداقل 20 کلمه درخواست اولیه برای ایجاد یک سخنرانی آغازین برای مکالمه مورد نیاز است.', + writeOpener: 'ویرایش بازکن', + add: 'اضافه کردن', + title: 'افتتاحیه مکالمه', + openingQuestion: 'سوالات آغازین', + noDataPlaceHolder: 'شروع مکالمه با کاربر می تواند به هوش مصنوعی کمک کند تا در برنامه های مکالمه ارتباط نزدیک تری با آنها برقرار کند.', + }, + modelConfig: { + modeType: { + chat: 'چت', + completion: 'کامل', + }, + model: 'مدل', + title: 'مدل و پارامترها', + setTone: 'لحن پاسخ ها را تنظیم کنید', + }, + inputs: { + run: 'اجرا', + queryTitle: 'محتوای پرس و جو', + userInputField: 'فیلد ورودی کاربر', + previewTitle: 'پیش نمایش سریع', + title: 'اشکال زدایی و پیش نمایش', + queryPlaceholder: 'لطفا متن درخواست را وارد کنید.', + noPrompt: 'سعی کنید مقداری اعلان در ورودی پیش از اعلان بنویسید', + completionVarTip: 'مقدار متغیر را پر کنید، که هر بار که سوالی ارسال می شود، به طور خودکار در کلمات سریع جایگزین می شود.', + chatVarTip: 'مقدار متغیر را پر کنید، که هر بار که یک جلسه جدید شروع می شود، به طور خودکار در کلمه prompt جایگزین می شود', + noVar: 'مقدار متغیر را پر کنید، که هر بار که یک جلسه جدید شروع می شود، به طور خودکار در کلمه prompt جایگزین می شود.', + }, + datasetConfig: { + retrieveOneWay: { + title: 'بازیابی N-to-1', + description: 'بر اساس هدف کاربر و توضیحات دانش، عامل به طور مستقل بهترین دانش را برای پرس و جو انتخاب می کند. بهترین برای برنامه های کاربردی با دانش متمایز و محدود.', + }, + retrieveMultiWay: { + title: 'بازیابی چند مسیری', + description: 'بر اساس هدف کاربر، پرس و جوها را در تمام دانش انجام می دهد، متن مربوطه را از چند منبع بازیابی می کند و پس از رتبه بندی مجدد، بهترین نتایج را مطابق با پرس و جو کاربر انتخاب می کند.', + }, + top_k: 'K بالا', + knowledgeTip: 'برای افزودن دانش روی دکمه " کلیک کنید', + score_threshold: 'آستانه امتیاز', + settingTitle: 'تنظیمات بازیابی', + params: 'پارام ها', + embeddingModelRequired: 'یک مدل تعبیه پیکربندی شده مورد نیاز است', + rerankModelRequired: 'یک مدل Rerank پیکربندی شده مورد نیاز است', + score_thresholdTip: 'برای تنظیم آستانه شباهت برای فیلتر کردن تکه ها استفاده می شود.', + top_kTip: 'برای فیلتر کردن تکه هایی که بیشتر شبیه به سؤالات کاربر هستند استفاده می شود. این سیستم همچنین با توجه به max_tokens مدل انتخاب شده، مقدار Top K را به صورت پویا تنظیم می کند.', + retrieveChangeTip: 'اصلاح حالت نمایه و حالت بازیابی ممکن است بر کاربردهای مرتبط با این دانش تأثیر بگذارد.', + }, + assistantType: { + chatAssistant: { + description: 'ساخت یک دستیار مبتنی بر چت با استفاده از یک مدل زبان بزرگ', + name: 'دستیار پایه', + }, + agentAssistant: { + description: 'یک عامل هوشمند بسازید که بتواند به طور مستقل ابزارهایی را برای تکمیل تسک ها انتخاب کند.', + name: 'دستیار نماینده', + }, + name: 'نوع دستیار', + }, + agent: { + agentModeType: { + functionCall: 'فراخوانی تابع', + ReACT: 'واکنش', + }, + setting: { + maximumIterations: { + name: 'حداکثر تکرارها', + description: 'تعداد تکرارهایی را که یک دستیار عامل می تواند اجرا کند محدود کنید', + }, + name: 'تنظیمات نماینده', + description: 'تنظیمات Agent Assistant امکان تنظیم حالت عامل و ویژگی های پیشرفته مانند اعلان های داخلی را فراهم می کند که فقط در نوع Agent موجود است.', + }, + tools: { + enabled: 'فعال', + name: 'ابزار', + description: 'استفاده از ابزارها می تواند قابلیت های LLM مانند جستجو در اینترنت یا انجام محاسبات علمی را گسترش دهد', + }, + agentMode: 'حالت عامل', + nextIteration: 'تکرار بعدی', + promptPlaceholder: 'درخواست خود را اینجا بنویسید', + agentModeDes: 'نوع حالت استنتاج را برای عامل تنظیم کنید', + buildInPrompt: 'اعلان داخلی', + firstPrompt: 'اولین اعلان', + }, + result: 'متن خروجی', + completionSubTitle: 'اعلان پیشوند', + variableTitle: 'متغیرهای', + formattingChangedTitle: 'قالب بندی تغییر کرد', + chatSubTitle: 'دستورالعمل', + debugAsSingleModel: 'اشکال زدایی به عنوان مدل واحد', + publishAs: 'انتشار به عنوان', + duplicateModel: 'تکراری', + noResult: 'خروجی در اینجا نمایش داده می شود.', + debugAsMultipleModel: 'اشکال زدایی به عنوان چندین مدل', + formattingChangedText: 'با تغییر قالب بندی، ناحیه اشکال زدایی بازنشانی می شود، مطمئن هستید؟', + variableTip: 'کاربران متغیرها را در یک فرم پر می کنند و به طور خودکار متغیرها را در اعلان جایگزین می کنند.', + autoAddVar: 'متغیرهای تعریف نشده که در پیش اعلان ارجاع داده شده اند، آیا می خواهید آنها را به صورت ورودی کاربر اضافه کنید؟', } export default translation diff --git a/web/i18n/fr-FR/app-debug.ts b/web/i18n/fr-FR/app-debug.ts index 78294fbd8b..910113074b 100644 --- a/web/i18n/fr-FR/app-debug.ts +++ b/web/i18n/fr-FR/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: 'Sorry, but you didn\'t provide a text to translate. Could you please provide the text?', }, }, + contentEnableLabel: 'Activation du contenu modéré', }, fileUpload: { title: 'Téléchargement de fichier', @@ -241,6 +242,7 @@ const translation = { 'Veuillez attendre que la réponse à la tâche en lot soit terminée.', notSelectModel: 'Veuillez choisir un modèle', waitForImgUpload: 'Veuillez attendre que l\'image soit téléchargée', + waitForFileUpload: 'Veuillez patienter jusqu’à ce que le(s) fichier(s) soit/les fichiers à télécharger', }, chatSubTitle: 'Instructions', completionSubTitle: 'Indicatif de Prompt', @@ -305,6 +307,33 @@ const translation = { 'defaultValue': 'Valeur par défaut', 'noDefaultValue': 'Aucune valeur par défaut', 'selectDefaultValue': 'Sélectionner la valeur par défaut', + 'file': { + image: { + name: 'Image', + }, + audio: { + name: 'Audio', + }, + document: { + name: 'Document', + }, + video: { + name: 'Vidéo', + }, + custom: { + description: 'Spécifiez d’autres types de fichiers.', + name: 'Autres types de fichiers', + createPlaceholder: ' Extension de fichier, par exemple .doc', + }, + supportFileTypes: 'Types de fichiers de support', + }, + 'content': 'Contenu', + 'uploadFileTypes': 'Types de fichiers de téléchargement', + 'multi-files': 'Liste des fichiers', + 'both': 'Les deux', + 'maxNumberOfUploads': 'Nombre maximal de téléchargements', + 'localUpload': 'Téléchargement local', + 'single-file': 'En file indienne', }, vision: { name: 'Vision', @@ -324,6 +353,7 @@ const translation = { url: 'URL', uploadLimit: 'Limite de téléchargement', }, + onlySupportVisionModelTip: 'Ne prend en charge que les modèles de vision', }, voice: { name: 'Voix', @@ -395,6 +425,7 @@ const translation = { score_threshold: 'Seuil de Score', score_thresholdTip: 'Utilisé pour définir le seuil de similarité pour le filtrage des morceaux.', retrieveChangeTip: 'La modification du mode d\'indexation et du mode de récupération peut affecter les applications associées à cette Connaissance.', + embeddingModelRequired: 'Un modèle d’incorporation configuré est requis', }, debugAsSingleModel: 'Déboguer comme Modèle Unique', debugAsMultipleModel: 'Déboguer en tant que Modèles Multiples', @@ -436,6 +467,79 @@ const translation = { enabled: 'Activé', }, }, + codegen: { + noDataLine1: 'Décrivez votre cas d’utilisation sur la gauche,', + instruction: 'Instructions', + generate: 'Générer', + noDataLine2: 'L’aperçu du code s’affichera ici.', + resTitle: 'Code généré', + applyChanges: 'Appliquer les modifications', + overwriteConfirmTitle: 'Écraser le code existant ?', + description: 'Le générateur de code utilise des modèles configurés pour générer un code de haute qualité basé sur vos instructions. Veuillez fournir des instructions claires et détaillées.', + loading: 'Génération de code...', + overwriteConfirmMessage: 'Cette action remplacera le code existant. Voulez-vous continuer ?', + generatedCodeTitle: 'Code généré', + apply: 'Appliquer', + title: 'Générateur de code', + instructionPlaceholder: 'Entrez une description détaillée du code que vous souhaitez générer.', + }, + generate: { + template: { + pythonDebugger: { + name: 'Débogueur Python', + instruction: 'Un bot capable de générer et de déboguer votre code en fonction de vos instructions', + }, + translation: { + name: 'Traduction', + instruction: 'Un traducteur capable de traduire en plusieurs langues', + }, + professionalAnalyst: { + instruction: 'Extrayez des informations, identifiez les risques et distillez les informations clés des rapports longs dans un seul mémo', + name: 'Analyste professionnel', + }, + excelFormulaExpert: { + name: 'Expert en formules Excel', + instruction: 'Un chatbot qui peut aider les utilisateurs novices à comprendre, utiliser et créer des formules Excel basées sur les instructions de l’utilisateur', + }, + travelPlanning: { + instruction: 'L’assistant de planification de voyage est un outil intelligent conçu pour aider les utilisateurs à planifier sans effort leurs voyages', + name: 'Planification de voyage', + }, + SQLSorcerer: { + instruction: 'Transformez le langage quotidien en requêtes SQL', + name: 'Sorcier SQL', + }, + GitGud: { + name: 'Git gud', + instruction: 'Générer des commandes Git appropriées en fonction des actions de contrôle de version décrites par l’utilisateur', + }, + meetingTakeaways: { + name: 'Points à retenir de la réunion', + instruction: 'Distillez les réunions en résumés concis comprenant les sujets de discussion, les points clés à retenir et les actions à prendre', + }, + writingsPolisher: { + name: 'Polisseuse d’écriture', + instruction: 'Utilisez des techniques de révision avancées pour améliorer vos écrits', + }, + }, + instruction: 'Instructions', + generate: 'Générer', + tryIt: 'Essaie', + overwriteTitle: 'Remplacer la configuration existante ?', + noDataLine2: 'L’aperçu de l’orchestration s’affichera ici.', + overwriteMessage: 'L’application de cette invite remplacera la configuration existante.', + noDataLine1: 'Décrivez votre cas d’utilisation sur la gauche,', + instructionPlaceHolder: 'Rédigez des instructions claires et précises.', + title: 'Générateur d’invites', + apply: 'Appliquer', + resTitle: 'Invite générée', + loading: 'Orchestrer l’application pour vous...', + description: 'Le générateur d’invites utilise le modèle configuré pour optimiser les invites afin d’obtenir une meilleure qualité et une meilleure structure. Veuillez rédiger des instructions claires et détaillées.', + }, + warningMessage: { + timeoutExceeded: 'Les résultats ne s’affichent pas en raison d’un délai d’expiration. Veuillez vous référer aux journaux pour rassembler les résultats complets.', + }, + noResult: 'La sortie sera affichée ici.', } export default translation diff --git a/web/i18n/hi-IN/app-debug.ts b/web/i18n/hi-IN/app-debug.ts index ea9b20c500..04b50da9ed 100644 --- a/web/i18n/hi-IN/app-debug.ts +++ b/web/i18n/hi-IN/app-debug.ts @@ -213,6 +213,34 @@ const translation = { after: 'में कॉन्फ़िगर किए गए ओपनएआई एपीआई कुंजी की आवश्यकता होती है।', }, }, + contentEnableLabel: 'मध्य स्तर की सामग्री सक्षम की गई', + }, + fileUpload: { + numberLimit: 'मैक्स अपलोड करता है', + title: 'फ़ाइल अपलोड', + modalTitle: 'फ़ाइल अपलोड सेटिंग', + description: 'चैट इनपुट बॉक्स छवियों, दस्तावेजों और अन्य फ़ाइलों को अपलोड करने की अनुमति देता है।', + supportedTypes: 'फ़ाइल प्रकारों का समर्थन करें', + }, + imageUpload: { + supportedTypes: 'फ़ाइल प्रकारों का समर्थन करें', + modalTitle: 'छवि अपलोड सेटिंग', + title: 'छवि अपलोड', + description: 'छवियों को अपलोड करने की अनुमति दें।', + numberLimit: 'मैक्स अपलोड करता है', + }, + bar: { + manage: 'प्रबंधित करें', + enableText: 'विशेषताएँ सक्षम हैं', + empty: 'वेब ऐप उपयोगकर्ता अनुभव को बढ़ाने के लिए फ़ीचर सक्षम करें', + }, + documentUpload: { + title: 'दस्तावेज़', + description: 'डॉक्यूमेंट सक्षम करने से मॉडल को दस्तावेज़ प्राप्त करने और उनके बारे में प्रश्नों का उत्तर देने की अनुमति मिलेगी।', + }, + audioUpload: { + title: 'ऑडियो', + description: 'ऑडियो सक्षम करने से मॉडल को ऑडियो फ़ाइलों के ट्रांसक्रिप्शन और विश्लेषण के लिए प्रोसेस करने की अनुमति मिलेगी।', }, }, automatic: { @@ -251,6 +279,7 @@ const translation = { 'कृपया बैच कार्य की प्रतिक्रिया पूरी होने तक प्रतीक्षा करें।', notSelectModel: 'कृपया एक मॉडल चुनें', waitForImgUpload: 'कृपया छवि अपलोड होने तक प्रतीक्षा करें', + waitForFileUpload: 'कृपया फ़ाइल/फ़ाइलें अपलोड होने का इंतज़ार करें', }, chatSubTitle: 'निर्देश', completionSubTitle: 'प्रारंभिक प्रॉम्प्ट', @@ -322,6 +351,30 @@ const translation = { 'defaultValue': 'डिफ़ॉल्ट मान', 'noDefaultValue': 'कोई डिफ़ॉल्ट मान नहीं', 'selectDefaultValue': 'डिफ़ॉल्ट मान चुनें', + 'file': { + image: { + name: 'छवि', + }, + audio: { + name: 'ऑडियो', + }, + document: {}, + video: { + name: 'वीडियो', + }, + custom: { + description: 'अन्य फ़ाइल प्रकार निर्दिष्ट करें।', + name: 'अन्य फ़ाइल प्रकार', + createPlaceholder: 'फ़ाइल एक्सटेंशन, जैसे .doc', + }, + supportFileTypes: 'फ़ाइल प्रकारों का समर्थन करें', + }, + 'both': 'दोनों', + 'multi-files': 'फ़ाइल सूची', + 'single-file': 'एकल फ़ाइल', + 'maxNumberOfUploads': 'अधिकतम अपलोड संख्या', + 'uploadFileTypes': 'फ़ाइल प्रकार अपलोड करें', + 'localUpload': 'स्थानीय अपलोड', }, vision: { name: 'विजन', @@ -341,6 +394,7 @@ const translation = { url: 'यूआरएल', uploadLimit: 'अपलोड सीमा', }, + onlySupportVisionModelTip: 'केवल दृष्टि मॉडल का समर्थन करता है', }, voice: { name: 'वॉयस', @@ -423,6 +477,7 @@ const translation = { 'खंडों को फ़िल्टर करने के लिए समानता थ्रेशोल्ड सेट करने के लिए उपयोग किया जाता है।', retrieveChangeTip: 'सूचकांक मोड और पुनःप्राप्ति मोड को संशोधित करने से इस ज्ञान से जुड़े अनुप्रयोग प्रभावित हो सकते हैं।', + embeddingModelRequired: 'एक कॉन्फ़िगर किया गया एंबेडिंग मॉडल आवश्यक है', }, debugAsSingleModel: 'एकल मॉडल के रूप में डिबग करें', debugAsMultipleModel: 'एकाधिक मॉडलों के रूप में डिबग करें', @@ -495,6 +550,79 @@ const translation = { description: 'ऑडियो सक्षम करने से मॉडल ट्रांसक्रिप्शन और विश्लेषण के लिए ऑडियो फ़ाइलों को प्रोसेस कर सकेगा।', }, }, + codegen: { + title: 'कोड जनरेटर', + loading: 'कोड उत्पन्न कर रहा हूँ...', + noDataLine1: 'बाईं ओर अपने उपयोग के मामले का वर्णन करें,', + apply: 'अनुप्रयोग करें', + generate: 'जनरेट करें', + instruction: 'अनुदेश', + overwriteConfirmTitle: 'मौजूदा कोड को ओवरराइट करें?', + resTitle: 'जनरेटेड कोड', + applyChanges: 'परिवर्तन लागू करें', + noDataLine2: 'कोड पूर्वावलोकन यहाँ दिखाई देगा।', + generatedCodeTitle: 'जनरेटेड कोड', + overwriteConfirmMessage: 'यह क्रिया मौजूदा कोड को ओवरराइट कर देगी। क्या आप जारी रखना चाहते हैं?', + instructionPlaceholder: 'आप जिस कोड का निर्माण करना चाहते हैं उसका विस्तृत विवरण प्रदान करें।', + description: 'कोड जनरेटर आपके निर्देशों के आधार पर उच्च गुणवत्ता वाली कोड उत्पन्न करने के लिए कॉन्फ़िगर किए गए मॉडलों का उपयोग करता है। कृपया स्पष्ट और विस्तृत निर्देश प्रदान करें।', + }, + generate: { + template: { + pythonDebugger: { + name: 'पाइथन डिबगर', + instruction: 'एक बॉट जो आपके निर्देशों के आधार पर आपका कोड उत्पन्न और डिबग कर सकता है।', + }, + translation: { + name: 'अनुवाद', + instruction: 'एक अनुवादक जो कई भाषाओं का अनुवाद कर सकता है', + }, + professionalAnalyst: { + name: 'पेशेवर विश्लेषक', + instruction: 'दीर्घ रिपोर्ट से अंतर्दृष्टियाँ निकालें, जोखिम की पहचान करें और प्रमुख जानकारी को एकल ज्ञापन में संक्षेपित करें।', + }, + excelFormulaExpert: { + name: 'एक्सेल फॉर्मूला विशेषज्ञ', + instruction: 'एक चैटबॉट जो नए उपयोगकर्ताओं की मदद कर सकता है कि वे कैसे समझें, उपयोग करें और उपयोगकर्ता के निर्देशों के आधार पर Excel फॉर्मूले बनाएं।', + }, + travelPlanning: { + name: 'यात्रा की योजना बनाना', + instruction: 'यात्रा नियोजन सहायक एक बुद्धिमान उपकरण है जिसे उपयोगकर्ताओं को बिना किसी परेशानी के अपने यात्रा की योजना बनाने में मदद करने के लिए डिज़ाइन किया गया है', + }, + SQLSorcerer: { + instruction: 'प्रति दिन की भाषा को SQL क्वेरियों में बदलें', + name: 'SQL जादूगर', + }, + GitGud: { + name: 'अच्छा खेलो', + instruction: 'उपयोगकर्ता द्वारा वर्णित संस्करण नियंत्रण क्रियाओं के आधार पर उचित Git कमांड उत्पन्न करें', + }, + meetingTakeaways: { + name: 'बैठक के निष्कर्ष', + instruction: 'बैठकों को संक्षिप्त सारांशों में डिस्टिल करें, जिसमें चर्चा के विषय, मुख्य निष्कर्ष और कार्य के बिंदु शामिल हों।', + }, + writingsPolisher: { + name: 'लेखन पालिशर', + instruction: 'अपनी लेखन को सुधारने के लिए उन्नत संपादन तकनीकों का उपयोग करें', + }, + }, + tryIt: 'इसे आजमाओ', + generate: 'जनरेट करें', + instructionPlaceHolder: 'स्पष्ट और विशेष निर्देश लिखें।', + title: 'प्रॉम्प्ट जनरेटर', + apply: 'अनुप्रयोग करें', + noDataLine1: 'बाईं ओर अपने उपयोग केस का वर्णन करें,', + instruction: 'अनुदेश', + loading: 'आपके लिए एप्लिकेशन का आयोजन कर रहे हैं...', + overwriteTitle: 'मौजूदा कॉन्फ़िगरेशन को अधिलेखित करें?', + noDataLine2: 'यहाँ सम्प्रेषण पूर्वावलोकन दिखाया जाएगा।', + resTitle: 'जनित प्रॉम्प्ट', + overwriteMessage: 'इस प्रॉम्प्ट को लागू करने से मौजूदा कॉन्फ़िगरेशन को ओवरराइड कर दिया जाएगा।', + description: 'प्रॉम्प्ट जेनरेटर उच्च गुणवत्ता और बेहतर संरचना के लिए प्रॉम्प्ट्स को ऑप्टिमाइज़ करने के लिए कॉन्फ़िगर किए गए मॉडल का उपयोग करता है। कृपया स्पष्ट और विस्तृत निर्देश लिखें।', + }, + warningMessage: { + timeoutExceeded: 'परिणाम टाइमआउट के कारण प्रदर्शित नहीं किए गए हैं। कृपया संपूर्ण परिणामों को इकट्ठा करने के लिए लॉग्स का संदर्भ लें।', + }, + noResult: 'प्रदर्शन यहाँ होगा।', } export default translation diff --git a/web/i18n/it-IT/app-debug.ts b/web/i18n/it-IT/app-debug.ts index f79cccf6e7..9344c88976 100644 --- a/web/i18n/it-IT/app-debug.ts +++ b/web/i18n/it-IT/app-debug.ts @@ -215,6 +215,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Abilitato il contenuto moderato', }, fileUpload: { title: 'Caricamento File', @@ -280,6 +281,7 @@ const translation = { 'Per favore attendi che la risposta all\'attività batch sia completata.', notSelectModel: 'Per favore scegli un modello', waitForImgUpload: 'Per favore attendi il caricamento dell\'immagine', + waitForFileUpload: 'Attendi il caricamento del file o dei file', }, chatSubTitle: 'Istruzioni', completionSubTitle: 'Prompt di prefisso', @@ -351,6 +353,32 @@ const translation = { 'defaultValue': 'Valore predefinito', 'noDefaultValue': 'Nessun valore predefinito', 'selectDefaultValue': 'Seleziona valore predefinito', + 'file': { + image: { + name: 'Immagine', + }, + audio: { + name: 'Audio', + }, + document: { + name: 'Documento', + }, + video: { + name: 'Video', + }, + custom: { + createPlaceholder: ' Estensione del file, ad esempio .doc', + description: 'Specificare altri tipi di file.', + name: 'Altri tipi di file', + }, + supportFileTypes: 'Tipi di file di supporto', + }, + 'single-file': 'File singolo', + 'uploadFileTypes': 'Caricare i tipi di file', + 'maxNumberOfUploads': 'Numero massimo di caricamenti', + 'multi-files': 'Elenco file', + 'both': 'Ambedue', + 'localUpload': 'Caricamento locale', }, vision: { name: 'Visione', @@ -371,6 +399,7 @@ const translation = { url: 'URL', uploadLimit: 'Limite di caricamento', }, + onlySupportVisionModelTip: 'Supporta solo i modelli di visione', }, voice: { name: 'Voce', @@ -451,6 +480,7 @@ const translation = { 'Usato per impostare la soglia di somiglianza per il filtraggio dei chunk.', retrieveChangeTip: 'Modificare la modalità di indicizzazione e la modalità di recupero può influenzare le applicazioni associate a questa Conoscenza.', + embeddingModelRequired: 'È necessario un modello di incorporamento configurato', }, debugAsSingleModel: 'Debug come modello singolo', debugAsMultipleModel: 'Debug come modelli multipli', @@ -497,6 +527,79 @@ const translation = { enabled: 'Abilitato', }, }, + codegen: { + noDataLine1: 'Descrivi il tuo caso d\'uso a sinistra,', + noDataLine2: 'L\'anteprima del codice verrà mostrata qui.', + generate: 'Generare', + resTitle: 'Codice generato', + overwriteConfirmTitle: 'Sovrascrivere il codice esistente?', + applyChanges: 'Applica modifiche', + title: 'Generatore di codice', + overwriteConfirmMessage: 'Questa azione sovrascriverà il codice esistente. Vuoi continuare?', + description: 'Il generatore di codice utilizza modelli configurati per generare codice di alta qualità in base alle istruzioni dell\'utente. Si prega di fornire istruzioni chiare e dettagliate.', + instruction: 'Disposizioni', + instructionPlaceholder: 'Inserisci una descrizione dettagliata del codice che desideri generare.', + generatedCodeTitle: 'Codice generato', + loading: 'Generazione del codice...', + apply: 'Applicare', + }, + generate: { + template: { + pythonDebugger: { + instruction: 'Un bot in grado di generare ed eseguire il debug del codice in base alle istruzioni', + name: 'Debugger Python', + }, + translation: { + instruction: 'Un traduttore in grado di tradurre in più lingue', + name: 'Traduzione', + }, + professionalAnalyst: { + name: 'Analista professionista', + instruction: 'Estrai informazioni, identifica i rischi e distilla le informazioni chiave da report lunghi in un unico memo', + }, + excelFormulaExpert: { + name: 'Esperto di formule per Excel', + instruction: 'Un chatbot che può aiutare gli utenti inesperti a comprendere, utilizzare e creare formule Excel basate sulle istruzioni dell\'utente', + }, + travelPlanning: { + name: 'Pianificazione del viaggio', + instruction: 'Il Travel Planning Assistant è uno strumento intelligente progettato per aiutare gli utenti a pianificare facilmente i loro viaggi', + }, + SQLSorcerer: { + name: 'Stregone SQL', + instruction: 'Trasforma il linguaggio di tutti i giorni in query SQL', + }, + GitGud: { + instruction: 'Generare comandi Git appropriati in base alle azioni di controllo della versione descritte dall\'utente', + name: 'Git gud', + }, + meetingTakeaways: { + name: 'Conclusioni sulle riunioni', + instruction: 'Distilla le riunioni in riassunti concisi che includono argomenti di discussione, punti chiave e punti d\'azione', + }, + writingsPolisher: { + name: 'Lucidatrice per scrittura', + instruction: 'Usa tecniche avanzate di copyediting per migliorare i tuoi scritti', + }, + }, + instruction: 'Disposizioni', + noDataLine1: 'Descrivi il tuo caso d\'uso a sinistra,', + title: 'Generatore di prompt', + instructionPlaceHolder: 'Scrivi istruzioni chiare e specifiche.', + loading: 'Orchestrare l\'applicazione per te...', + apply: 'Applicare', + overwriteMessage: 'L\'applicazione di questo prompt sovrascriverà la configurazione esistente.', + description: 'Il generatore di prompt utilizza il modello configurato per ottimizzare i prompt per una qualità superiore e una struttura migliore. Si prega di scrivere istruzioni chiare e dettagliate.', + overwriteTitle: 'Sovrascrivere la configurazione esistente?', + resTitle: 'Prompt generato', + generate: 'Generare', + noDataLine2: 'L\'anteprima dell\'orchestrazione verrà visualizzata qui.', + tryIt: 'Provalo', + }, + warningMessage: { + timeoutExceeded: 'I risultati non vengono visualizzati a causa del timeout. Si prega di fare riferimento ai registri per raccogliere risultati completi.', + }, + noResult: 'L\'output verrà visualizzato qui.', } export default translation diff --git a/web/i18n/ko-KR/app-debug.ts b/web/i18n/ko-KR/app-debug.ts index aade904a6b..8bf509ae12 100644 --- a/web/i18n/ko-KR/app-debug.ts +++ b/web/i18n/ko-KR/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '에 OpenAI API 키가 설정되어 있어야 합니다.', }, }, + contentEnableLabel: '중간 콘텐츠 사용', }, fileUpload: { title: '파일 업로드', @@ -240,6 +241,7 @@ const translation = { waitForBatchResponse: '배치 작업에 대한 응답이 완료될 때까지 기다려 주세요.', notSelectModel: '모델을 선택해 주세요', waitForImgUpload: '이미지 업로드가 완료될 때까지 기다려 주세요', + waitForFileUpload: '파일이 업로드될 때까지 기다리십시오.', }, chatSubTitle: '단계', completionSubTitle: '접두사 프롬프트', @@ -301,6 +303,33 @@ const translation = { 'defaultValue': '기본값', 'noDefaultValue': '기본값 없음', 'selectDefaultValue': '기본값 선택', + 'file': { + image: { + name: '이미지', + }, + audio: { + name: '오디오', + }, + document: { + name: '문서', + }, + video: { + name: '비디오', + }, + custom: { + description: '다른 파일 형식을 지정합니다.', + name: '다른 파일 형식', + createPlaceholder: ' 파일 확장자(예: .doc', + }, + supportFileTypes: '지원 파일 형식', + }, + 'content': '콘텐츠', + 'single-file': '단일 파일', + 'both': '둘다', + 'multi-files': '파일 목록', + 'uploadFileTypes': '파일 형식 업로드', + 'maxNumberOfUploads': '최대 업로드 수', + 'localUpload': '로컬 업로드', }, vision: { name: '비전', @@ -320,6 +349,7 @@ const translation = { url: 'URL', uploadLimit: '업로드 제한', }, + onlySupportVisionModelTip: '비전 모델만 지원', }, voice: { name: '음성', @@ -388,6 +418,7 @@ const translation = { score_threshold: '점수 임계값', score_thresholdTip: '청크 필터링의 유사성 임계값을 설정하는 데 사용됩니다.', retrieveChangeTip: '인덱스 모드 및 리트리벌 모드를 변경하면 이 지식과 관련된 애플리케이션에 영향을 줄 수 있습니다.', + embeddingModelRequired: '구성된 임베딩 모델이 필요합니다.', }, debugAsSingleModel: '단일 모델로 디버그', debugAsMultipleModel: '다중 모델로 디버그', @@ -429,6 +460,79 @@ const translation = { enabled: '활성화됨', }, }, + codegen: { + instruction: '지시', + apply: '적용하다', + generatedCodeTitle: '생성된 코드', + title: '코드 생성기', + applyChanges: '변경 사항 적용', + resTitle: '생성된 코드', + noDataLine1: '왼쪽에 사용 사례를 설명하십시오.', + overwriteConfirmTitle: '기존 코드를 덮어쓰시겠습니까?', + generate: '창조하다', + loading: '코드 생성 중...', + overwriteConfirmMessage: '이 작업은 기존 코드를 덮어씁니다. 계속하시겠습니까?', + noDataLine2: '코드 미리 보기가 여기에 표시됩니다.', + instructionPlaceholder: '생성하려는 코드에 대한 자세한 설명을 입력합니다.', + description: '코드 생성기는 구성된 모델을 사용하여 지시에 따라 고품질 코드를 생성합니다. 명확하고 자세한 지침을 제공하십시오.', + }, + generate: { + template: { + pythonDebugger: { + name: '파이썬 디버거', + instruction: '지시에 따라 코드를 생성하고 디버깅할 수 있는 봇', + }, + translation: { + name: '번역', + instruction: '여러 언어를 번역할 수 있는 번역기', + }, + professionalAnalyst: { + name: '전문 분석가', + instruction: '인사이트를 추출하고, 위험을 식별하고, 긴 보고서에서 주요 정보를 단일 메모로 추출합니다.', + }, + excelFormulaExpert: { + name: 'Excel 수식 전문가', + instruction: '초보 사용자가 사용자 지시에 따라 Excel 수식을 이해, 사용 및 생성할 수 있도록 도와주는 챗봇', + }, + travelPlanning: { + name: '여행 계획', + instruction: '여행 계획 도우미는 사용자가 쉽게 여행을 계획할 수 있도록 설계된 지능형 도구입니다', + }, + SQLSorcerer: { + name: 'SQL 마법사', + instruction: '일상적인 언어를 SQL 쿼리로 변환', + }, + GitGud: { + name: '깃구드', + instruction: '사용자가 설명한 버전 제어 작업을 기반으로 적절한 Git 명령 생성', + }, + meetingTakeaways: { + name: '회의 요점', + instruction: '회의를 토론 주제, 핵심 내용 및 실행 항목을 포함한 간결한 요약으로 추출합니다.', + }, + writingsPolisher: { + name: '글쓰기 폴리셔', + instruction: '고급 카피에디팅 기술을 사용하여 글쓰기 향상', + }, + }, + apply: '적용하다', + instruction: '지시', + resTitle: '생성된 프롬프트', + generate: '창조하다', + tryIt: '사용해 보기', + title: '프롬프트 생성기', + overwriteTitle: '기존 구성을 재정의하시겠습니까?', + loading: '응용 프로그램 오케스트레이션...', + instructionPlaceHolder: '명확하고 구체적인 지침을 작성하십시오.', + noDataLine2: '오케스트레이션 미리 보기가 여기에 표시됩니다.', + overwriteMessage: '이 프롬프트를 적용하면 기존 구성이 재정의됩니다.', + noDataLine1: '왼쪽에 사용 사례를 설명하십시오.', + description: '프롬프트 생성기는 구성된 모델을 사용하여 더 높은 품질과 더 나은 구조를 위해 프롬프트를 최적화합니다. 명확하고 상세한 지침을 작성하십시오.', + }, + warningMessage: { + timeoutExceeded: '시간 초과로 인해 결과가 표시되지 않습니다. 전체 결과를 수집하려면 로그를 참조하십시오.', + }, + noResult: '출력이 여기에 표시됩니다.', } export default translation diff --git a/web/i18n/pl-PL/app-debug.ts b/web/i18n/pl-PL/app-debug.ts index 06e271fbbb..7e20b2d7e3 100644 --- a/web/i18n/pl-PL/app-debug.ts +++ b/web/i18n/pl-PL/app-debug.ts @@ -213,6 +213,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Włączono moderowanie treści', }, fileUpload: { title: 'Przesyłanie plików', @@ -277,6 +278,7 @@ const translation = { waitForBatchResponse: 'Proszę czekać na odpowiedź na zadanie wsadowe.', notSelectModel: 'Proszę wybrać model', waitForImgUpload: 'Proszę czekać na przesłanie obrazu', + waitForFileUpload: 'Poczekaj na przesłanie pliku/plików', }, chatSubTitle: 'Instrukcje', completionSubTitle: 'Prefix Monitu', @@ -346,6 +348,33 @@ const translation = { 'defaultValue': 'Wartość domyślna', 'noDefaultValue': 'Brak wartości domyślnej', 'selectDefaultValue': 'Wybierz wartość domyślną', + 'file': { + image: { + name: 'Obraz', + }, + audio: { + name: 'Dźwięk', + }, + document: { + name: 'Dokument', + }, + video: { + name: 'Wideo', + }, + custom: { + description: 'Określ inne typy plików.', + createPlaceholder: ' Rozszerzenie pliku, np. .doc', + name: 'Inne typy plików', + }, + supportFileTypes: 'Obsługa typów plików', + }, + 'both': 'Obie', + 'localUpload': 'Przesyłanie lokalne', + 'uploadFileTypes': 'Typy przesyłanych plików', + 'maxNumberOfUploads': 'Maksymalna liczba przesyłanych plików', + 'single-file': 'Pojedynczy plik', + 'content': 'Zawartość', + 'multi-files': 'Lista plików', }, vision: { name: 'Wizja', @@ -366,6 +395,7 @@ const translation = { url: 'URL', uploadLimit: 'Limit przesyłania', }, + onlySupportVisionModelTip: 'Obsługuje tylko modele wizyjne', }, voice: { name: 'Głos', @@ -446,6 +476,7 @@ const translation = { 'Używany do ustawienia progu podobieństwa dla filtrowania fragmentów.', retrieveChangeTip: 'Modyfikacja trybu indeksowania i odzyskiwania może wpłynąć na aplikacje powiązane z tą Wiedzą.', + embeddingModelRequired: 'Wymagany jest skonfigurowany model osadzania', }, debugAsSingleModel: 'Debuguj jako pojedynczy model', debugAsMultipleModel: 'Debuguj jako wiele modeli', @@ -492,6 +523,79 @@ const translation = { enabled: 'Włączone', }, }, + codegen: { + generate: 'Stworzyć', + applyChanges: 'Stosowanie zmian', + loading: 'Generowanie kodu...', + generatedCodeTitle: 'Wygenerowany kod', + description: 'Generator kodów używa skonfigurowanych modeli do generowania wysokiej jakości kodu na podstawie Twoich instrukcji. Podaj jasne i szczegółowe instrukcje.', + resTitle: 'Wygenerowany kod', + title: 'Generator kodów', + overwriteConfirmMessage: 'Ta akcja spowoduje zastąpienie istniejącego kodu. Czy chcesz kontynuować?', + instruction: 'Instrukcje', + apply: 'Zastosować', + instructionPlaceholder: 'Wprowadź szczegółowy opis kodu, który chcesz wygenerować.', + noDataLine2: 'W tym miejscu zostanie wyświetlony podgląd kodu.', + noDataLine1: 'Opisz swój przypadek użycia po lewej stronie,', + overwriteConfirmTitle: 'Nadpisać istniejący kod?', + }, + generate: { + template: { + pythonDebugger: { + name: 'Debuger języka Python', + instruction: 'Bot, który może generować i debugować kod na podstawie instrukcji', + }, + translation: { + name: 'Tłumaczenie', + instruction: 'Tłumacz, który może tłumaczyć wiele języków', + }, + professionalAnalyst: { + instruction: 'Wyodrębniaj szczegółowe informacje, identyfikuj ryzyko i destyluj kluczowe informacje z długich raportów w jednej notatce', + name: 'Zawodowy analityk', + }, + excelFormulaExpert: { + name: 'Ekspert ds. formuł programu Excel', + instruction: 'Chatbot, który może pomóc początkującym użytkownikom zrozumieć, używać i tworzyć formuły Excela na podstawie instrukcji użytkownika', + }, + travelPlanning: { + name: 'Planowanie podróży', + instruction: 'Asystent planowania podróży to inteligentne narzędzie zaprojektowane, aby pomóc użytkownikom w łatwym planowaniu podróży', + }, + SQLSorcerer: { + instruction: 'Przekształć język potoczny w zapytania SQL', + name: 'Czarownik SQL', + }, + GitGud: { + instruction: 'Generowanie odpowiednich poleceń usługi Git na podstawie opisanych przez użytkownika akcji kontroli wersji', + name: 'Git gud', + }, + meetingTakeaways: { + name: 'Wnioski ze spotkania', + instruction: 'Podziel spotkania na zwięzłe podsumowania, w tym tematy dyskusji, kluczowe wnioski i działania', + }, + writingsPolisher: { + instruction: 'Korzystaj z zaawansowanych technik redakcyjnych, aby ulepszyć swoje teksty', + name: 'Polerka do pisania', + }, + }, + instructionPlaceHolder: 'Napisz jasne i konkretne instrukcje.', + instruction: 'Instrukcje', + generate: 'Stworzyć', + tryIt: 'Spróbuj', + overwriteMessage: 'Zastosowanie tego monitu spowoduje zastąpienie istniejącej konfiguracji.', + resTitle: 'Wygenerowany monit', + noDataLine1: 'Opisz swój przypadek użycia po lewej stronie,', + title: 'Generator podpowiedzi', + apply: 'Zastosować', + overwriteTitle: 'Nadpisać istniejącą konfigurację?', + loading: 'Orkiestracja aplikacji dla Ciebie...', + description: 'Generator podpowiedzi używa skonfigurowanego modelu do optymalizacji podpowiedzi w celu uzyskania wyższej jakości i lepszej struktury. Napisz jasne i szczegółowe instrukcje.', + noDataLine2: 'W tym miejscu zostanie wyświetlony podgląd orkiestracji.', + }, + warningMessage: { + timeoutExceeded: 'Wyniki nie są wyświetlane z powodu przekroczenia limitu czasu. Zapoznaj się z dziennikami, aby zebrać pełne wyniki.', + }, + noResult: 'W tym miejscu zostaną wyświetlone dane wyjściowe.', } export default translation diff --git a/web/i18n/pt-BR/app-debug.ts b/web/i18n/pt-BR/app-debug.ts index 5f8aabec65..fd158acdc8 100644 --- a/web/i18n/pt-BR/app-debug.ts +++ b/web/i18n/pt-BR/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Conteúdo moderado habilitado', }, fileUpload: { title: 'Upload de Arquivo', @@ -258,6 +259,7 @@ const translation = { 'Aguarde a resposta à tarefa em lote ser concluída.', notSelectModel: 'Por favor, escolha um modelo', waitForImgUpload: 'Aguarde o upload da imagem', + waitForFileUpload: 'Aguarde o upload do arquivo / arquivos', }, chatSubTitle: 'Instruções', completionSubTitle: 'Prefixo da Solicitação', @@ -322,6 +324,33 @@ const translation = { 'defaultValue': 'Valor padrão', 'noDefaultValue': 'Nenhum valor padrão', 'selectDefaultValue': 'Selecionar valor padrão', + 'file': { + image: { + name: 'Imagem', + }, + audio: { + name: 'Áudio', + }, + document: { + name: 'Documento', + }, + video: { + name: 'Vídeo', + }, + custom: { + description: 'Especifique outros tipos de arquivo.', + name: 'Outros tipos de arquivo', + createPlaceholder: ' Extensão de arquivo, por exemplo, .doc', + }, + supportFileTypes: 'Tipos de arquivo de suporte', + }, + 'content': 'Conteúdo', + 'multi-files': 'Lista de arquivos', + 'single-file': 'Fila indiana', + 'maxNumberOfUploads': 'Número máximo de uploads', + 'uploadFileTypes': 'Carregar tipos de arquivo', + 'both': 'Ambos', + 'localUpload': 'Local Upload', }, vision: { name: 'Visão', @@ -341,6 +370,7 @@ const translation = { url: 'URL', uploadLimit: 'Limite de Upload', }, + onlySupportVisionModelTip: 'Suporta apenas modelos de visão', }, voice: { name: 'voz', @@ -412,6 +442,7 @@ const translation = { score_threshold: 'Limiar de Pontuação', score_thresholdTip: 'Usado para definir o limiar de similaridade para filtragem de trechos.', retrieveChangeTip: 'Modificar o modo de índice e o modo de recuperação pode afetar os aplicativos associados a este Conhecimento.', + embeddingModelRequired: 'É necessário um modelo de incorporação configurado', }, assistantType: { name: 'Tipo de Assistente', @@ -449,6 +480,83 @@ const translation = { enabled: 'Habilitado', }, }, + codegen: { + instruction: 'Instruções', + generatedCodeTitle: 'Código gerado', + noDataLine1: 'Descreva seu caso de uso à esquerda,', + loading: 'Gerando código...', + description: 'O Gerador de código usa modelos configurados para gerar código de alta qualidade com base em suas instruções. Por favor, forneça instruções claras e detalhadas.', + generate: 'Gerar', + resTitle: 'Código gerado', + title: 'Gerador de código', + overwriteConfirmTitle: 'Substituir o código existente?', + overwriteConfirmMessage: 'Essa ação substituirá o código existente. Você quer continuar?', + apply: 'Aplicar', + applyChanges: 'Aplicar alterações', + instructionPlaceholder: 'Insira uma descrição detalhada do código que você deseja gerar.', + noDataLine2: 'A visualização do código será exibida aqui.', + }, + generate: { + template: { + pythonDebugger: { + instruction: 'Um bot que pode gerar e depurar seu código com base em suas instruções', + name: 'Depurador Python', + }, + translation: { + name: 'Tradução', + instruction: 'Um tradutor que pode traduzir vários idiomas', + }, + professionalAnalyst: { + name: 'Analista profissional', + instruction: 'Extraia insights, identifique riscos e destile informações importantes de relatórios longos em um único memorando', + }, + excelFormulaExpert: { + name: 'Especialista em fórmulas do Excel', + instruction: 'Um chatbot que pode ajudar usuários iniciantes a entender, usar e criar fórmulas do Excel com base nas instruções do usuário', + }, + travelPlanning: { + name: 'Planejamento de viagens', + instruction: 'O Assistente de Planejamento de Viagens é uma ferramenta inteligente projetada para ajudar os usuários a planejar suas viagens sem esforço', + }, + SQLSorcerer: { + instruction: 'Transforme a linguagem cotidiana em consultas SQL', + name: 'Feiticeiro SQL', + }, + GitGud: { + instruction: 'Gerar comandos Git apropriados com base nas ações de controle de versão descritas pelo usuário', + name: 'Bom jogo', + }, + meetingTakeaways: { + name: 'Conclusões da reunião', + instruction: 'Destilar reuniões em resumos concisos, incluindo tópicos de discussão, principais conclusões e itens de ação', + }, + writingsPolisher: { + instruction: 'Use técnicas avançadas de edição de texto para melhorar seus escritos', + name: 'Polidor de escrita', + }, + }, + generate: 'Gerar', + overwriteMessage: 'A aplicação desse prompt substituirá a configuração existente.', + apply: 'Aplicar', + title: 'Gerador de Prompt', + description: 'O Gerador de Prompts usa o modelo configurado para otimizar prompts para maior qualidade e melhor estrutura. Por favor, escreva instruções claras e detalhadas.', + instructionPlaceHolder: 'Escreva instruções claras e específicas.', + noDataLine2: 'A visualização da orquestração será exibida aqui.', + tryIt: 'Experimente', + loading: 'Orquestrando o aplicativo para você...', + instruction: 'Instruções', + resTitle: 'Prompt gerado', + noDataLine1: 'Descreva seu caso de uso à esquerda,', + overwriteTitle: 'Substituir a configuração existente?', + }, + warningMessage: { + timeoutExceeded: 'Os resultados não são exibidos devido ao tempo limite. Consulte os logs para obter os resultados completos.', + }, + debugAsSingleModel: 'Depurar como modelo único', + noResult: 'A saída será exibida aqui.', + debugAsMultipleModel: 'Depurar como vários modelos', + publishAs: 'Publicar como', + duplicateModel: 'Duplicar', } export default translation diff --git a/web/i18n/ro-RO/app-debug.ts b/web/i18n/ro-RO/app-debug.ts index f6a10df1d2..d8b455e4e0 100644 --- a/web/i18n/ro-RO/app-debug.ts +++ b/web/i18n/ro-RO/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Conținut moderat activat', }, fileUpload: { title: 'Încărcare fișier', @@ -258,6 +259,7 @@ const translation = { 'Vă rugăm să așteptați finalizarea sarcinii în lot.', notSelectModel: 'Vă rugăm să alegeți un model', waitForImgUpload: 'Vă rugăm să așteptați încărcarea imaginii', + waitForFileUpload: 'Vă rugăm să așteptați încărcarea fișierului / fișierelor', }, chatSubTitle: 'Instrucțiuni', completionSubTitle: 'Prefix prompt', @@ -322,6 +324,33 @@ const translation = { 'defaultValue': 'Valoare implicită', 'noDefaultValue': 'Fără valoare implicită', 'selectDefaultValue': 'Selectați valoarea implicită', + 'file': { + image: { + name: 'Imagine', + }, + audio: { + name: 'Audio', + }, + document: { + name: 'Document', + }, + video: { + name: 'Video', + }, + custom: { + createPlaceholder: ' Extensia fișierului, de exemplu .doc', + name: 'Alte tipuri de fișiere', + description: 'Specificați alte tipuri de fișiere.', + }, + supportFileTypes: 'Tipuri de fișiere de asistență', + }, + 'content': 'Conținut', + 'single-file': 'Un singur fișier', + 'multi-files': 'Lista de fișiere', + 'uploadFileTypes': 'Încărcați tipuri de fișiere', + 'localUpload': 'Încărcare locală', + 'maxNumberOfUploads': 'Numărul maxim de încărcări', + 'both': 'Ambii', }, vision: { name: 'Viziune', @@ -341,6 +370,7 @@ const translation = { url: 'URL', uploadLimit: 'Limită de încărcare', }, + onlySupportVisionModelTip: 'Acceptă doar modele vizuale', }, voice: { name: 'Voce', @@ -411,6 +441,7 @@ const translation = { score_threshold: 'Prag scor', score_thresholdTip: 'Utilizat pentru a seta pragul de similitudine pentru filtrarea bucăților.', retrieveChangeTip: 'Modificarea modului de indexare și a modului de recuperare poate afecta aplicațiile asociate cu aceste Cunoștințe.', + embeddingModelRequired: 'Este necesar un model de încorporare configurat', }, debugAsSingleModel: 'Depanare ca Model Unic', debugAsMultipleModel: 'Depanare ca Modele Multiple', @@ -452,6 +483,79 @@ const translation = { enabled: 'Activat', }, }, + codegen: { + overwriteConfirmTitle: 'Suprascrierea codului existent?', + resTitle: 'Cod generat', + instruction: 'Instrucţiuni', + description: 'Generatorul de cod utilizează modele configurate pentru a genera cod de înaltă calitate pe baza instrucțiunilor dvs. Vă rugăm să oferiți instrucțiuni clare și detaliate.', + generatedCodeTitle: 'Cod generat', + apply: 'Aplica', + noDataLine2: 'Previzualizarea codului va fi afișată aici.', + noDataLine1: 'Descrieți cazul de utilizare din stânga,', + instructionPlaceholder: 'Introduceți descrierea detaliată a codului pe care doriți să îl generați.', + generate: 'Genera', + title: 'Generator de coduri', + applyChanges: 'Aplicarea modificărilor', + overwriteConfirmMessage: 'Această acțiune va suprascrie codul existent. Vrei să continui?', + loading: 'Generarea codului...', + }, + generate: { + template: { + pythonDebugger: { + name: 'Depanator Python', + instruction: 'Un bot care vă poate genera și depana codul pe baza instrucțiunilor dvs.', + }, + translation: { + instruction: 'Un traducător care poate traduce mai multe limbi', + name: 'Traducere', + }, + professionalAnalyst: { + name: 'Analist profesionist', + instruction: 'Extrageți informații, identificați riscurile și distilați informațiile cheie din rapoartele lungi într-o singură notă', + }, + excelFormulaExpert: { + name: 'Expert în formule Excel', + instruction: 'Un chatbot care poate ajuta utilizatorii începători să înțeleagă, să utilizeze și să creeze formule Excel pe baza instrucțiunilor utilizatorului', + }, + travelPlanning: { + name: 'Planificarea călătoriei', + instruction: 'Asistentul de planificare a călătoriilor este un instrument inteligent conceput pentru a ajuta utilizatorii să-și planifice călătoriile fără efort', + }, + SQLSorcerer: { + name: 'Vrăjitor SQL', + instruction: 'Transformați limbajul de zi cu zi în interogări SQL', + }, + GitGud: { + instruction: 'Generați comenzi Git adecvate pe baza acțiunilor de control al versiunii descrise de utilizator', + name: 'Git gud', + }, + meetingTakeaways: { + instruction: 'Distilați întâlnirile în rezumate concise, inclusiv subiecte de discuție, concluzii cheie și elemente de acțiune', + name: 'Concluzii ale întâlnirilor', + }, + writingsPolisher: { + name: 'Șlefuitor de scris', + instruction: 'Utilizați tehnici avansate de editare pentru a vă îmbunătăți scrierile', + }, + }, + apply: 'Aplica', + generate: 'Genera', + resTitle: 'Solicitare generată', + tryIt: 'Încearcă-l', + overwriteTitle: 'Înlocuiți configurația existentă?', + description: 'Generatorul de solicitări utilizează modelul configurat pentru a optimiza solicitările pentru o calitate superioară și o structură mai bună. Vă rugăm să scrieți instrucțiuni clare și detaliate.', + instruction: 'Instrucţiuni', + loading: 'Orchestrarea aplicației pentru dvs....', + noDataLine1: 'Descrieți cazul de utilizare din stânga,', + title: 'Generator de solicitări', + instructionPlaceHolder: 'Scrieți instrucțiuni clare și specifice.', + noDataLine2: 'Previzualizarea orchestrației va fi afișată aici.', + overwriteMessage: 'Aplicarea acestei solicitări va înlocui configurația existentă.', + }, + warningMessage: { + timeoutExceeded: 'Rezultatele nu sunt afișate din cauza expirării. Vă rugăm să consultați jurnalele pentru a colecta rezultatele complete.', + }, + noResult: 'Ieșirea va fi afișată aici.', } export default translation diff --git a/web/i18n/ru-RU/app-debug.ts b/web/i18n/ru-RU/app-debug.ts index 1d45c90a43..0ff97c6cca 100644 --- a/web/i18n/ru-RU/app-debug.ts +++ b/web/i18n/ru-RU/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Включен модерируемый контент', }, fileUpload: { title: 'Загрузка файлов', @@ -294,6 +295,7 @@ const translation = { 'Пожалуйста, дождитесь завершения ответа на пакетное задание.', notSelectModel: 'Пожалуйста, выберите модель', waitForImgUpload: 'Пожалуйста, дождитесь загрузки изображения', + waitForFileUpload: 'Пожалуйста, дождитесь загрузки файла/файлов', }, chatSubTitle: 'Инструкции', completionSubTitle: 'Префикс Промпта', @@ -359,6 +361,32 @@ const translation = { 'defaultValue': 'Значение по умолчанию', 'noDefaultValue': 'Без значения по умолчанию', 'selectDefaultValue': 'Выберите значение по умолчанию', + 'file': { + image: { + name: 'Образ', + }, + audio: { + name: 'Аудио', + }, + document: { + name: 'Документ', + }, + video: { + name: 'Видео', + }, + custom: { + createPlaceholder: ' Расширение файла, например .doc', + name: 'Другие типы файлов', + description: 'Укажите другие типы файлов.', + }, + supportFileTypes: 'Типы файлов поддержки', + }, + 'both': 'Оба', + 'localUpload': 'Локальная загрузка', + 'maxNumberOfUploads': 'Максимальное количество загрузок', + 'uploadFileTypes': 'Типы файлов загрузки', + 'single-file': 'Друг за другом', + 'multi-files': 'Список файлов', }, vision: { name: 'Зрение', @@ -378,6 +406,7 @@ const translation = { url: 'URL', uploadLimit: 'Лимит загрузки', }, + onlySupportVisionModelTip: 'Поддерживает только модели машинного зрения', }, voice: { name: 'Голос', @@ -449,6 +478,7 @@ const translation = { score_threshold: 'Порог оценки', score_thresholdTip: 'Используется для установки порога сходства для фильтрации фрагментов.', retrieveChangeTip: 'Изменение режима индексации и режима поиска может повлиять на приложения, связанные с этими знаниями.', + embeddingModelRequired: 'Требуется сконфигурированная модель встраивания', }, debugAsSingleModel: 'Отладка как одной модели', debugAsMultipleModel: 'Отладка как нескольких моделей', @@ -490,6 +520,26 @@ const translation = { enabled: 'Включено', }, }, + codegen: { + generate: 'Порождать', + title: 'Генератор кодов', + resTitle: 'Сгенерированный код', + generatedCodeTitle: 'Сгенерированный код', + applyChanges: 'Применение изменений', + loading: 'Генерация кода...', + noDataLine2: 'Предварительный просмотр кода будет показан здесь.', + instruction: 'Резолюция', + apply: 'Применять', + overwriteConfirmTitle: 'Перезаписать существующий код?', + overwriteConfirmMessage: 'Это действие перезапишет существующий код. Хотите продолжить?', + instructionPlaceholder: 'Введите подробное описание кода, который вы хотите сгенерировать.', + noDataLine1: 'Опишите свой вариант использования слева,', + description: 'Генератор кода использует настроенные модели для создания высококачественного кода на основе ваших инструкций. Пожалуйста, предоставьте четкие и подробные инструкции.', + }, + warningMessage: { + timeoutExceeded: 'Результаты не отображаются из-за тайм-аута. Пожалуйста, обратитесь к журналам для получения полных результатов.', + }, + noResult: 'Вывод будет отображаться здесь.', } export default translation diff --git a/web/i18n/sl-SI/app-debug.ts b/web/i18n/sl-SI/app-debug.ts index 597a8afa06..6e02a0d586 100644 --- a/web/i18n/sl-SI/app-debug.ts +++ b/web/i18n/sl-SI/app-debug.ts @@ -185,8 +185,19 @@ const translation = { content: { input: 'Moderiraj VSEBINO VNOSA', output: 'Moderiraj VSEBINO IZHODA', + preset: 'Prednastavljeni odgovori', + errorMessage: 'Prednastavljeni odgovori ne smejo biti prazni', + condition: 'Zmerna vsebina INPUT in OUTPUT je omogočena vsaj ena', + supportMarkdown: 'Podprt za Markdown', + fromApi: 'Prednastavljene odgovore vrne API', + placeholder: 'Prednastavljena vsebina odgovorov tukaj', + }, + openaiNotConfig: { + after: '', + before: 'Za moderiranje OpenAI potrebujete ključ OpenAI API, konfiguriran v', }, }, + contentEnableLabel: 'Omogočena zmerna vsebina', }, debug: { title: 'Odpravljanje napak', @@ -264,6 +275,291 @@ const translation = { description: 'Omogočitev zvoka bo omogočila modelu, da obdela zvočne datoteke za prepisovanje in analizo.', }, }, + codegen: { + instruction: 'Navodila', + title: 'Generator kode', + resTitle: 'Ustvarjena koda', + loading: 'Generiranje kode ...', + generatedCodeTitle: 'Ustvarjena koda', + noDataLine1: 'Na levi opišite primer uporabe,', + noDataLine2: 'Predogled kode bo prikazan tukaj.', + instructionPlaceholder: 'Vnesite podroben opis kode, ki jo želite ustvariti.', + apply: 'Uporabiti', + generate: 'Ustvariti', + overwriteConfirmTitle: 'Prepisati obstoječo kodo?', + applyChanges: 'Uporaba sprememb', + overwriteConfirmMessage: 'S tem dejanjem boste prepisali obstoječo kodo. Želite nadaljevati?', + description: 'Generator kode uporablja konfigurirane modele za ustvarjanje visokokakovostne kode na podlagi vaših navodil. Navedite jasna in podrobna navodila.', + }, + generate: { + template: { + pythonDebugger: { + name: 'Python razhroščevalnik', + instruction: 'Bot, ki lahko ustvari in razhrošči vašo kodo na podlagi vaših navodil', + }, + translation: { + name: 'Prevod', + instruction: 'Prevajalec, ki zna prevesti več jezikov', + }, + professionalAnalyst: { + name: 'Strokovni analitik', + instruction: 'Pridobite vpoglede, prepoznajte tveganja in destilirajte ključne informacije iz dolgih poročil v en sam zapisek', + }, + excelFormulaExpert: { + instruction: 'Chatbot, ki lahko začetnikom pomaga razumeti, uporabljati in ustvarjati Excelove formule na podlagi uporabniških navodil', + name: 'Strokovnjak za formule v Excelu', + }, + travelPlanning: { + instruction: 'Pomočnik za načrtovanje potovanj je inteligentno orodje, ki uporabnikom pomaga pri enostavnem načrtovanju potovanj', + name: 'Načrtovanje potovanj', + }, + SQLSorcerer: { + name: 'Čarovnik SQL', + instruction: 'Pretvorba vsakdanjega jezika v poizvedbe SQL', + }, + GitGud: { + instruction: 'Ustvarite ustrezne ukaze Git na podlagi dejanj nadzora različic, ki jih je opisal uporabnik', + name: 'Git gud', + }, + meetingTakeaways: { + name: 'Povzetki s srečanja', + instruction: 'Srečanja destilirajte v jedrnate povzetke, vključno s temami za razpravo, ključnimi ugotovitvami in dejanji', + }, + writingsPolisher: { + name: 'Pisanje polir', + instruction: 'Uporabite napredne tehnike urejanja besedil za izboljšanje svojega pisanja', + }, + }, + apply: 'Uporabiti', + generate: 'Ustvariti', + instructionPlaceHolder: 'Napišite jasna in specifična navodila.', + resTitle: 'Ustvarjen poziv', + noDataLine2: 'Predogled orkestracije bo prikazan tukaj.', + overwriteMessage: 'Če uporabite ta poziv, boste preglasili obstoječo konfiguracijo.', + overwriteTitle: 'Preglasiti obstoječo konfiguracijo?', + instruction: 'Navodila', + loading: 'Orkestriranje aplikacije za vas ...', + noDataLine1: 'Na levi opišite primer uporabe,', + title: 'Generator pozivov', + tryIt: 'Poskusite', + description: 'Generator pozivov uporablja konfiguriran model za optimizacijo pozivov za višjo kakovost in boljšo strukturo. Prosimo, napišite jasna in podrobna navodila.', + }, + resetConfig: { + title: 'Potrdite ponastavitev?', + message: 'Ponastavitev zavrže spremembe in obnovi zadnjo objavljeno konfiguracijo.', + }, + errorMessage: { + notSelectModel: 'Prosimo, izberite model', + waitForImgUpload: 'Prosimo, počakajte, da se slika naloži', + waitForResponse: 'Počakajte, da se odgovor na prejšnje sporočilo dokonča.', + waitForBatchResponse: 'Počakajte, da se konča odgovor na paketno nalogo.', + queryRequired: 'Besedilo zahteve je obvezno.', + waitForFileUpload: 'Prosimo, počakajte, da se datoteka/datoteke naložijo', + }, + warningMessage: { + timeoutExceeded: 'Rezultati niso prikazani zaradi časovne omejitve. Prosimo, glejte dnevnike, da zberete popolne rezultate.', + }, + variableTable: { + action: 'Dejanja', + optional: 'Neobvezno', + typeString: 'Niz', + typeSelect: 'Izbrati', + type: 'Vrsta vnosa', + key: 'Spremenljivi ključ', + name: 'Ime uporabniškega vnosnega polja', + }, + varKeyError: {}, + otherError: { + promptNoBeEmpty: 'Poziv ne more biti prazen', + historyNoBeEmpty: 'Zgodovina pogovorov mora biti nastavljena v pozivu', + queryNoBeEmpty: 'Poizvedba mora biti nastavljena v pozivu', + }, + variableConfig: { + 'file': { + image: { + name: 'Podoba', + }, + audio: { + name: 'Avdio', + }, + document: { + name: 'Dokument', + }, + video: { + name: 'Video', + }, + custom: { + description: 'Določite druge vrste datotek.', + name: 'Druge vrste datotek', + createPlaceholder: ' Pripona datoteke, npr. .doc', + }, + supportFileTypes: 'Podporne vrste datotek', + }, + 'errorMsg': { + varNameCanBeRepeat: 'Imena spremenljivke ni mogoče ponoviti', + atLeastOneOption: 'Potrebna je vsaj ena možnost', + optionRepeat: 'Ima možnosti ponavljanja', + labelNameRequired: 'Ime nalepke je obvezno', + }, + 'content': 'Vsebina', + 'number': 'Številka', + 'selectDefaultValue': 'Izbira privzete vrednosti', + 'maxNumberOfUploads': 'Največje število nalaganj', + 'localUpload': 'Lokalno nalaganje', + 'string': 'Kratko besedilo', + 'paragraph': 'Odstavek', + 'maxLength': 'Največja dolžina', + 'defaultValue': 'Privzeta vrednost', + 'apiBasedVar': 'Spremenljivka, ki temelji na API-ju', + 'stringTitle': 'Možnosti polja z besedilom obrazca', + 'varName': 'Ime spremenljivke', + 'text-input': 'Kratko besedilo', + 'uploadFileTypes': 'Nalaganje vrst datotek', + 'noDefaultValue': 'Ni privzete vrednosti', + 'addOption': 'Dodaj možnost', + 'select': 'Izbrati', + 'hide': 'Skriti', + 'both': 'Oba', + 'multi-files': 'Seznam datotek', + 'single-file': 'Ena datoteka', + 'options': 'Možnosti', + 'addModalTitle': 'Dodajanje vhodnega polja', + 'inputPlaceholder': 'Prosimo, vnesite', + 'fieldType': 'Vrsta polja', + 'editModalTitle': 'Uredi vnosno polje', + 'required': 'Zahteva', + 'labelName': 'Ime nalepke', + }, + vision: { + visionSettings: { + resolution: 'Resolucija', + uploadMethod: 'Način nalaganja', + high: 'Visok', + url: 'Spletni naslov', + localUpload: 'Lokalno nalaganje', + uploadLimit: 'Omejitev nalaganja', + title: 'Nastavitve vida', + both: 'Oba', + low: 'Nizek', + }, + name: 'Vid', + settings: 'Nastavitve', + description: 'Omogoči vid bo modelu omogočil, da posname slike in odgovarja na vprašanja o njih.', + onlySupportVisionModelTip: 'Podpira samo modele vida', + }, + voice: { + voiceSettings: { + voice: 'Glas', + language: 'Jezik', + autoPlayDisabled: 'Off', + autoPlayEnabled: 'Na', + resolutionTooltip: 'Jezik glasovne podpore za pretvorbo besedila v govor。', + title: 'Glasovne nastavitve', + autoPlay: 'Samodejno predvajanje', + }, + defaultDisplay: 'Privzeti glas', + name: 'Glas', + settings: 'Nastavitve', + description: 'Glasovne nastavitve za pretvorbo besedila v govor', + }, + openingStatement: { + openingQuestion: 'Uvodna vprašanja', + title: 'Odpiralec pogovorov', + tooShort: 'Za ustvarjanje uvodnih pripomb za pogovor je potrebnih vsaj 20 besed začetnega poziva.', + noDataPlaceHolder: 'Začetek pogovora z uporabnikom lahko AI pomaga vzpostaviti tesnejšo povezavo z njimi v pogovornih aplikacijah.', + add: 'Dodati', + writeOpener: 'Odpiralnik za urejanje', + }, + modelConfig: { + modeType: { + chat: 'Chat', + completion: 'Dokončati', + }, + title: 'Model in parametri', + model: 'Model', + setTone: 'Nastavitev tona odzivov', + }, + inputs: { + queryPlaceholder: 'Prosimo, vnesite besedilo zahteve.', + title: 'Odpravljanje napak in predogled', + chatVarTip: 'Izpolnite vrednost spremenljivke, ki bo samodejno nadomeščena v pozivni besedi vsakič, ko se začne nova seja', + queryTitle: 'Vsebina poizvedbe', + userInputField: 'Uporabniško polje za vnos', + run: 'TEČI', + noPrompt: 'Poskusite napisati nekaj poziva v vnos pred pozivom', + previewTitle: 'Takojšen predogled', + noVar: 'Izpolnite vrednost spremenljivke, ki bo samodejno nadomeščena v pozivni besedi vsakič, ko se začne nova seja.', + completionVarTip: 'Izpolnite vrednost spremenljivke, ki bo samodejno nadomeščena v pozivnih besedah vsakič, ko boste oddali vprašanje.', + }, + datasetConfig: { + retrieveOneWay: { + title: 'Pridobivanje N-na-1', + description: 'Na podlagi namena uporabnika in opisov znanja agent avtonomno izbere najboljše znanje za poizvedovanje. Najboljše za aplikacije z izrazitim, omejenim znanjem.', + }, + retrieveMultiWay: { + title: 'Pridobivanje več poti', + description: 'Na podlagi namena uporabnika poizvedbe v celotnem znanju, pridobijo ustrezno besedilo iz več virov in izberejo najboljše rezultate, ki se ujemajo z uporabniško poizvedbo po ponovnem razvrščanju.', + }, + params: 'Params', + embeddingModelRequired: 'Potreben je konfiguriran model vdelave', + settingTitle: 'Nastavitve pridobivanja', + rerankModelRequired: 'Potreben je konfiguriran model ponovnega razvrščanja', + knowledgeTip: 'Kliknite gumb " " za dodajanje znanja', + score_threshold: 'Prag ocenjevanja', + score_thresholdTip: 'Uporablja se za nastavitev praga podobnosti za filtriranje kosov.', + retrieveChangeTip: 'Spreminjanje kazalnega načina in načina pridobivanja lahko vpliva na aplikacije, povezane s tem znanjem.', + top_k: 'Vrh K', + top_kTip: 'Uporablja se za filtriranje kosov, ki so najbolj podobni vprašanjem uporabnikov. Sistem bo tudi dinamično prilagajal vrednost Top K, glede na max_tokens izbranega modela.', + }, + assistantType: { + chatAssistant: { + name: 'Osnovni pomočnik', + description: 'Ustvarjanje pomočnika za klepet z uporabo velikega jezikovnega modela', + }, + agentAssistant: { + name: 'Pomočnik agenta', + description: 'Zgradite inteligentnega agenta, ki lahko samostojno izbere orodja za dokončanje nalog', + }, + name: 'Vrsta pomočnika', + }, + agent: { + agentModeType: { + functionCall: 'Klicanje funkcij', + ReACT: 'Reagirajo', + }, + setting: { + maximumIterations: { + description: 'Omejitev števila ponovitev, ki jih lahko izvede pomočnik agenta', + name: 'Največje število ponovitev', + }, + description: 'Nastavitve pomočnika za agente omogočajo nastavitev načina agenta in naprednih funkcij, kot so vgrajeni pozivi, ki so na voljo samo v vrsti agenta.', + name: 'Nastavitve agenta', + }, + tools: { + enabled: 'Omogočeno', + name: 'Orodja', + description: 'Uporaba orodij lahko razširi zmogljivosti LLM, kot je iskanje po internetu ali izvajanje znanstvenih izračunov', + }, + agentMode: 'Način agenta', + promptPlaceholder: 'Tukaj napišite svoj poziv', + agentModeDes: 'Nastavitev vrste načina sklepanja za agenta', + firstPrompt: 'Prvi poziv', + nextIteration: 'Naslednja ponovitev', + buildInPrompt: 'Poziv za vgradnjo', + }, + chatSubTitle: 'Navodila', + variableTitle: 'Spremenljivke', + completionSubTitle: 'Poziv za predpono', + debugAsSingleModel: 'Odpravljanje napak kot en model', + noResult: 'Tukaj bo prikazan izhod.', + debugAsMultipleModel: 'Odpravljanje napak kot več modelov', + formattingChangedText: 'Spreminjanje oblikovanja bo ponastavilo območje za odpravljanje napak, ste prepričani?', + autoAddVar: 'Nedoločene spremenljivke, na katere se sklicuje vnaprejšnji poziv, ali jih želite dodati v obrazec za vnos uporabnika?', + formattingChangedTitle: 'Spremenjeno oblikovanje', + duplicateModel: 'Dvojnik', + publishAs: 'Objavi kot', + result: 'Izhodno besedilo', + variableTip: 'Uporabniki izpolnijo spremenljivke v obrazcu in samodejno zamenjajo spremenljivke v pozivu.', } export default translation diff --git a/web/i18n/th-TH/app-debug.ts b/web/i18n/th-TH/app-debug.ts index a2e939b5fe..303b91353a 100644 --- a/web/i18n/th-TH/app-debug.ts +++ b/web/i18n/th-TH/app-debug.ts @@ -27,7 +27,488 @@ const translation = { title: 'เสียง', description: 'การเปิดใช้งานเสียงจะทำให้โมเดลสามารถประมวลผลไฟล์เสียงเพื่อการถอดข้อความและการวิเคราะห์ได้', }, + groupChat: { + title: 'ปรับปรุงแชท', + description: 'เพิ่มการตั้งค่าก่อนการสนทนาสําหรับแอปสามารถปรับปรุงประสบการณ์ของผู้ใช้ได้', + }, + groupExperience: { + title: 'ปรับปรุงประสบการณ์', + }, + conversationOpener: { + description: 'ในแอปแชท ประโยคแรกที่ AI พูดกับผู้ใช้อย่างแข็งขันมักจะใช้เป็นการต้อนรับ', + title: 'ที่เปิดการสนทนา', + }, + suggestedQuestionsAfterAnswer: { + title: 'ติดตาม', + resDes: '3 ข้อเสนอแนะสําหรับผู้ใช้คําถามถัดไป', + tryToAsk: 'ลองถาม', + description: 'การตั้งค่าคําแนะนําคําถามถัดไปจะช่วยให้ผู้ใช้แชทได้ดีขึ้น', + }, + moreLikeThis: { + title: 'เพิ่มเติมเช่นนี้', + tip: 'การใช้คุณสมบัตินี้จะมีค่าใช้จ่ายโทเค็นเพิ่มเติม', + generateNumTip: 'จํานวนครั้งที่สร้างขึ้นแต่ละครั้ง', + description: 'สร้างข้อความหลายข้อความพร้อมกัน จากนั้นแก้ไขและสร้างต่อไป', + }, + speechToText: { + description: 'สามารถใช้การป้อนข้อมูลด้วยเสียงในการแชทได้', + title: 'คําพูดเป็นข้อความ', + resDes: 'เปิดใช้งานการป้อนข้อมูลด้วยเสียง', + }, + textToSpeech: { + title: 'ข้อความเป็นคําพูด', + resDes: 'เปิดใช้งานข้อความเป็นเสียง', + description: 'ข้อความการสนทนาสามารถแปลงเป็นคําพูดได้', + }, + citation: { + title: 'การอ้างอิงและการระบุแหล่งที่มา', + resDes: 'เปิดใช้งานการอ้างอิงและการระบุแหล่งที่มา', + description: 'แสดงเอกสารต้นฉบับและส่วนที่มาของเนื้อหาที่สร้างขึ้น', + }, + annotation: { + scoreThreshold: { + accurateMatch: 'การจับคู่ที่แม่นยํา', + description: 'ใช้เพื่อกําหนดเกณฑ์ความคล้ายคลึงกันสําหรับการตอบกลับคําอธิบายประกอบ', + easyMatch: 'จับคู่ง่าย', + title: 'เกณฑ์คะแนน', + }, + matchVariable: { + title: 'ตัวแปรการจับคู่', + choosePlaceholder: 'เลือกตัวแปรการจับคู่', + }, + removeConfirm: 'ลบคําอธิบายประกอบนี้ ?', + cacheManagement: 'คำ อธิบาย', + title: 'คําอธิบายประกอบ ตอบกลับ', + remove: 'ถอด', + resDes: 'เปิดใช้งานการตอบสนองคําอธิบายประกอบ', + add: 'เพิ่มคําอธิบายประกอบ', + edit: 'แก้ไขคําอธิบายประกอบ', + cached: 'มีคําอธิบายประกอบ', + description: 'คุณสามารถเพิ่มการตอบกลับคุณภาพสูงลงในแคชด้วยตนเองเพื่อจับคู่ลําดับความสําคัญกับคําถามของผู้ใช้ที่คล้ายกัน', + }, + dataSet: { + queryVariable: { + ok: 'ตกลง, ได้', + noVar: 'ไม่ใช่ตัวแปร', + choosePlaceholder: 'เลือกตัวแปรแบบสอบถาม', + tip: 'ตัวแปรนี้จะถูกใช้เป็นอินพุตแบบสอบถามสําหรับการดึงบริบท โดยรับข้อมูลบริบทที่เกี่ยวข้องกับอินพุตของตัวแปรนี้', + unableToQueryDataSetTip: 'ไม่สามารถสืบค้นความรู้ได้สําเร็จ โปรดเลือกตัวแปรการสืบค้นบริบทในส่วนบริบท', + noVarTip: 'โปรดสร้างตัวแปรภายใต้ส่วนตัวแปร', + title: 'ตัวแปรคิวรี', + contextVarNotEmpty: 'ตัวแปรการสืบค้นบริบทต้องไม่ว่างเปล่า', + deleteContextVarTip: 'ตัวแปรนี้ถูกตั้งค่าเป็นตัวแปรแบบสอบถามบริบท และการลบตัวแปรนี้จะส่งผลต่อการใช้ความรู้ตามปกติ หากคุณยังต้องการลบ โปรดเลือกใหม่ในส่วนบริบท', + unableToQueryDataSet: 'ไม่สามารถสืบค้นความรู้ได้', + }, + noDataSet: 'ไม่พบความรู้', + notSupportSelectMulti: 'ปัจจุบันรองรับความรู้เพียงหนึ่งความรู้', + selected: 'เลือกความรู้', + title: 'ความรู้', + toCreate: 'ไปที่สร้าง', + words: 'นิรุกติ', + textBlocks: 'บล็อกข้อความ', + noData: 'คุณสามารถนําเข้าความรู้เป็นบริบทได้', + selectTitle: 'เลือกข้อมูลอ้างอิง ความรู้', + }, + tools: { + modal: { + toolType: { + title: 'ประเภทเครื่องมือ', + placeholder: 'โปรดเลือกประเภทเครื่องมือ', + }, + name: { + title: 'ชื่อ', + placeholder: 'กรุณากรอกชื่อ', + }, + variableName: { + title: 'ชื่อตัวแปร', + placeholder: 'กรุณากรอกชื่อตัวแปร', + }, + title: 'เครื่องมือ', + }, + title: 'เครื่อง มือ', + tips: 'เครื่องมือมีวิธีการเรียก API มาตรฐาน โดยใช้อินพุตหรือตัวแปรของผู้ใช้เป็นพารามิเตอร์คําขอสําหรับการสืบค้นข้อมูลภายนอกตามบริบท', + }, + conversationHistory: { + editModal: { + userPrefix: 'คํานําหน้าผู้ใช้', + title: 'แก้ไขชื่อบทบาทการสนทนา', + assistantPrefix: 'คํานําหน้าผู้ช่วย', + }, + description: 'ตั้งชื่อคํานําหน้าสําหรับบทบาทการสนทนา', + learnMore: 'ศึกษาเพิ่มเติม', + title: 'ประวัติการสนทนา', + }, + toolbox: { + title: 'เครื่อง มือ', + }, + moderation: { + modal: { + provider: { + openaiTip: { + suffix: '.', + prefix: 'การกลั่นกรอง OpenAI ต้องใช้คีย์ OpenAI API ที่กําหนดค่าไว้ใน', + }, + keywords: 'คำ', + openai: 'การกลั่นกรอง OpenAI', + title: 'ผู้จัดหา', + }, + keywords: { + placeholder: 'หนึ่งบรรทัดต่อบรรทัดคั่นด้วยตัวแบ่งบรรทัด', + tip: 'หนึ่งบรรทัด คั่นด้วยตัวแบ่งบรรทัด สูงสุด 100 อักขระต่อบรรทัด', + line: 'สาย', + }, + content: { + output: 'เนื้อหา OUTPUT ปานกลาง', + errorMessage: 'การตอบกลับที่ตั้งไว้ล่วงหน้าต้องไม่ว่างเปล่า', + fromApi: 'การตอบกลับที่ตั้งไว้ล่วงหน้าจะถูกส่งคืนโดย API', + supportMarkdown: 'รองรับ Markdown', + placeholder: 'เนื้อหาตอบกลับที่ตั้งไว้ล่วงหน้าที่นี่', + condition: 'เปิดใช้งานเนื้อหา INPUT และ OUTPUT กลั่นกรองอย่างน้อยหนึ่งรายการ', + input: 'กลั่นกรองเนื้อหา INPUT', + preset: 'การตอบกลับที่ตั้งไว้ล่วงหน้า', + }, + openaiNotConfig: { + after: '', + before: 'การกลั่นกรอง OpenAI ต้องใช้คีย์ OpenAI API ที่กําหนดค่าไว้ใน', + }, + title: 'การตั้งค่าการกลั่นกรองเนื้อหา', + }, + contentEnableLabel: 'เปิดใช้งานเนื้อหากลั่นกรอง', + outputEnabled: 'ผลิตภัณฑ์', + title: 'การกลั่นกรองเนื้อหา', + allEnabled: 'อินพุต & เอาต์พุต', + inputEnabled: 'อินพุต', + description: 'รักษาความปลอดภัยเอาต์พุตโมเดลโดยใช้ API การกลั่นกรองหรือรักษารายการคําที่ละเอียดอ่อน', + }, }, + pageTitle: { + line1: 'พร้อมท์', + line2: 'วิศวกรรม', + }, + promptMode: { + advancedWarning: { + ok: 'ตกลง, ได้', + description: 'ในโหมดผู้เชี่ยวชาญ คุณสามารถแก้ไข PROMPT ทั้งหมดได้', + title: 'คุณได้เปลี่ยนเป็นโหมดผู้เชี่ยวชาญแล้ว และเมื่อคุณแก้ไข PROMPT แล้ว คุณจะไม่สามารถกลับสู่โหมดพื้นฐานได้', + learnMore: 'ศึกษาเพิ่มเติม', + }, + operation: { + addMessage: 'เพิ่มข้อความ', + }, + switchBack: 'สลับกลับ', + contextMissing: 'องค์ประกอบบริบทที่พลาดไปประสิทธิภาพของพรอมต์อาจไม่ดี', + simple: 'เปลี่ยนเป็นโหมดผู้เชี่ยวชาญเพื่อแก้ไข PROMPT ทั้งหมด', + advanced: 'แฟชั่นผู้เชี่ยวชาญ', + }, + operation: { + automatic: 'ผลิต', + applyConfig: 'ตีพิมพ์', + disagree: 'ไม่ชอบ', + userAction: 'ผู้ใช้', + stopResponding: 'หยุดการตอบสนอง', + cancelAgree: 'ยกเลิกถูกใจ', + addFeature: 'เพิ่มคุณสมบัติ', + cancelDisagree: 'ยกเลิกการไม่ชอบ', + agree: 'ชอบ', + resetConfig: 'รี เซ็ต', + debugConfig: 'ดีบัก', + }, + notSetAPIKey: { + settingBtn: 'ไปที่การตั้งค่า', + trailFinished: 'เส้นทางเสร็จสิ้น', + description: 'ยังไม่ได้ตั้งค่าคีย์ผู้ให้บริการ LLM และจําเป็นต้องตั้งค่าก่อนการดีบัก', + title: 'ไม่ได้ตั้งค่าคีย์ผู้ให้บริการ LLM', + }, + trailUseGPT4Info: { + description: 'ใช้ gpt-4 โปรดตั้งค่าคีย์ API', + title: 'ไม่รองรับ gpt-4 ในขณะนี้', + }, + codegen: { + applyChanges: 'ใช้การเปลี่ยนแปลง', + generate: 'ผลิต', + instructionPlaceholder: 'ป้อนคําอธิบายโดยละเอียดของรหัสที่คุณต้องการสร้าง', + noDataLine1: 'อธิบายกรณีการใช้งานของคุณทางด้านซ้าย', + title: 'เครื่องสร้างรหัส', + overwriteConfirmMessage: 'การดําเนินการนี้จะเขียนทับโค้ดที่มีอยู่ คุณต้องการดําเนินการต่อหรือไม่?', + loading: 'กําลังสร้างโค้ด...', + generatedCodeTitle: 'รหัสที่สร้างขึ้น', + apply: 'ใช้', + overwriteConfirmTitle: 'เขียนทับรหัสที่มีอยู่ใช่ไหม', + instruction: 'คำ แนะ นำ', + resTitle: 'รหัสที่สร้างขึ้น', + noDataLine2: 'ตัวอย่างโค้ดจะแสดงที่นี่', + description: 'ตัวสร้างโค้ดใช้โมเดลที่กําหนดค่าเพื่อสร้างโค้ดคุณภาพสูงตามคําแนะนําของคุณ โปรดให้คําแนะนําที่ชัดเจนและละเอียด', + }, + generate: { + template: { + pythonDebugger: { + name: 'ดีบักเกอร์ Python', + instruction: 'บอทที่สามารถสร้างและแก้ไขข้อบกพร่องโค้ดของคุณตามคําสั่งของคุณ', + }, + translation: { + instruction: 'นักแปลที่สามารถแปลได้หลายภาษา', + name: 'การแปล', + }, + professionalAnalyst: { + name: 'นักวิเคราะห์มืออาชีพ', + instruction: 'ดึงข้อมูลเชิงลึก ระบุความเสี่ยง และกลั่นกรองข้อมูลสําคัญจากรายงานขนาดยาวลงในบันทึกเดียว', + }, + excelFormulaExpert: { + name: 'ผู้เชี่ยวชาญด้านสูตร Excel', + instruction: 'แชทบอทที่สามารถช่วยให้ผู้ใช้มือใหม่เข้าใจ ใช้ และสร้างสูตร Excel ตามคําแนะนําของผู้ใช้', + }, + travelPlanning: { + name: 'การวางแผนการเดินทาง', + instruction: 'ผู้ช่วยวางแผนการเดินทางเป็นเครื่องมืออัจฉริยะที่ออกแบบมาเพื่อช่วยให้ผู้ใช้วางแผนการเดินทางได้อย่างง่ายดาย', + }, + SQLSorcerer: { + name: 'พ่อมด SQL', + instruction: 'แปลงภาษาในชีวิตประจําวันให้เป็นแบบสอบถาม SQL', + }, + GitGud: { + name: 'กิต gud', + instruction: 'สร้างคําสั่ง Git ที่เหมาะสมตามการดําเนินการควบคุมเวอร์ชันที่ผู้ใช้อธิบาย', + }, + meetingTakeaways: { + name: 'ประเด็นการประชุม', + instruction: 'กลั่นกรองการประชุมเป็นบทสรุปที่กระชับ รวมถึงหัวข้อการสนทนา ประเด็นสําคัญ และรายการปฏิบัติ', + }, + writingsPolisher: { + name: 'เครื่องขัดเขียน', + instruction: 'ใช้เทคนิคการแก้ไขคําโฆษณาขั้นสูงเพื่อปรับปรุงงานเขียนของคุณ', + }, + }, + generate: 'ผลิต', + instruction: 'คำ แนะ นำ', + apply: 'ใช้', + resTitle: 'พรอมต์ที่สร้างขึ้น', + title: 'เครื่องกําเนิดพร้อมท์', + noDataLine2: 'ตัวอย่างการประสานเสียงจะแสดงที่นี่', + tryIt: 'ลองดู', + overwriteTitle: 'แทนที่การกําหนดค่าที่มีอยู่ใช่ไหม', + noDataLine1: 'อธิบายกรณีการใช้งานของคุณทางด้านซ้าย', + instructionPlaceHolder: 'เขียนคําแนะนําที่ชัดเจนและเฉพาะเจาะจง', + overwriteMessage: 'การใช้พรอมต์นี้จะแทนที่การกําหนดค่าที่มีอยู่', + description: 'ตัวสร้างพรอมต์ใช้โมเดลที่กําหนดค่าเพื่อปรับพรอมต์ให้เหมาะสมเพื่อคุณภาพที่สูงขึ้นและโครงสร้างที่ดีขึ้น โปรดเขียนคําแนะนําที่ชัดเจนและละเอียด', + loading: 'กําลังประสานงานแอปพลิเคชันสําหรับคุณ...', + }, + resetConfig: { + title: 'ยืนยันการรีเซ็ต?', + message: 'รีเซ็ตจะละทิ้งการเปลี่ยนแปลง โดยคืนค่าการกําหนดค่าที่เผยแพร่ล่าสุด', + }, + errorMessage: { + waitForFileUpload: 'โปรดรอให้ไฟล์/ไฟล์อัปโหลด', + notSelectModel: 'โปรดเลือกรุ่น', + waitForBatchResponse: 'โปรดรอให้การตอบกลับงานแบทช์เสร็จสมบูรณ์', + waitForResponse: 'โปรดรอให้การตอบกลับข้อความก่อนหน้าเสร็จสมบูรณ์', + waitForImgUpload: 'โปรดรอให้ภาพอัปโหลด', + queryRequired: 'ต้องส่งข้อความคําขอ', + }, + warningMessage: { + timeoutExceeded: 'ผลลัพธ์จะไม่แสดงเนื่องจากหมดเวลา โปรดดูบันทึกเพื่อรวบรวมผลลัพธ์ที่สมบูรณ์', + }, + variableTable: { + optional: 'เสริม', + key: 'ปุ่มตัวแปร', + typeString: 'เชือก', + typeSelect: 'เลือก', + type: 'ประเภทอินพุต', + name: 'ชื่อฟิลด์ป้อนข้อมูลของผู้ใช้', + action: 'การดําเนินการ', + }, + varKeyError: {}, + otherError: { + queryNoBeEmpty: 'ต้องตั้งค่าคิวรีในพร้อมท์', + promptNoBeEmpty: 'พรอมต์ไม่สามารถว่างเปล่าได้', + historyNoBeEmpty: 'ต้องตั้งค่าประวัติการสนทนาในข้อความแจ้ง', + }, + variableConfig: { + 'file': { + image: { + name: 'ภาพ', + }, + audio: { + name: 'เสียง', + }, + document: { + name: 'เอกสาร', + }, + video: { + name: 'วีดิทัศน์', + }, + custom: { + description: 'ระบุประเภทไฟล์อื่นๆ', + name: 'ไฟล์ประเภทอื่น ๆ', + createPlaceholder: ' นามสกุลไฟล์ เช่น .doc', + }, + supportFileTypes: 'ประเภทไฟล์ที่รองรับ', + }, + 'errorMsg': { + atLeastOneOption: 'จําเป็นต้องมีอย่างน้อยหนึ่งตัวเลือก', + labelNameRequired: 'ต้องมีชื่อฉลาก', + optionRepeat: 'มีตัวเลือกการทําซ้ํา', + varNameCanBeRepeat: 'ไม่สามารถทําซ้ําชื่อตัวแปรได้', + }, + 'hide': 'ซ่อน', + 'required': 'ต้องระบุ', + 'number': 'เลข', + 'inputPlaceholder': 'กรุณาป้อน', + 'uploadFileTypes': 'อัปโหลดประเภทไฟล์', + 'content': 'เนื้อหา', + 'addOption': 'เพิ่มตัวเลือก', + 'labelName': 'ชื่อฉลาก', + 'options': 'ตัวเลือก', + 'stringTitle': 'ตัวเลือกกล่องข้อความฟอร์ม', + 'noDefaultValue': 'ไม่มีค่าเริ่มต้น', + 'varName': 'ชื่อตัวแปร', + 'defaultValue': 'ค่าเริ่มต้น', + 'fieldType': 'ชนิดฟิลด์', + 'selectDefaultValue': 'เลือกค่าเริ่มต้น', + 'string': 'ข้อความสั้น', + 'text-input': 'ข้อความสั้น', + 'multi-files': 'รายการไฟล์', + 'maxLength': 'ความยาวสูงสุด', + 'addModalTitle': 'เพิ่มฟิลด์อินพุต', + 'localUpload': 'อัปโหลดในเครื่อง', + 'single-file': 'ไฟล์เดียว', + 'select': 'เลือก', + 'maxNumberOfUploads': 'จํานวนการอัปโหลดสูงสุด', + 'editModalTitle': 'แก้ไขฟิลด์อินพุต', + 'apiBasedVar': 'ตัวแปรที่ใช้ API', + 'paragraph': 'วรรค', + 'both': 'ทั้งสอง', + }, + vision: { + visionSettings: { + resolution: 'มติ', + uploadMethod: 'วิธีการอัปโหลด', + localUpload: 'อัปโหลดในเครื่อง', + low: 'ต่ํา', + high: 'สูง', + title: 'การตั้งค่าวิสัยทัศน์', + uploadLimit: 'ขีดจํากัดการอัปโหลด', + both: 'ทั้งสอง', + url: 'URL', + }, + onlySupportVisionModelTip: 'รองรับเฉพาะโมเดลการมองเห็น', + name: 'การมองเห็น', + description: 'เปิดใช้งานวิสัยทัศน์จะช่วยให้โมเดลสามารถถ่ายภาพและตอบคําถามเกี่ยวกับภาพเหล่านั้นได้', + settings: 'การตั้งค่า', + }, + voice: { + voiceSettings: { + autoPlayEnabled: 'บน', + autoPlay: 'เล่นอัตโนมัติ', + voice: 'เสียง', + resolutionTooltip: 'ภาษาสนับสนุนเสียงแปลงข้อความเป็นคําพูด。', + autoPlayDisabled: 'ไป', + title: 'การตั้งค่าเสียง', + language: 'ภาษา', + }, + name: 'เสียง', + settings: 'การตั้งค่า', + description: 'การตั้งค่าเสียงข้อความเป็นคําพูด', + defaultDisplay: 'เสียงเริ่มต้น', + }, + openingStatement: { + tooShort: 'ต้องใช้ข้อความแจ้งเริ่มต้นอย่างน้อย 20 คําเพื่อสร้างคําพูดเปิดการสนทนา', + openingQuestion: 'คําถามเปิด', + writeOpener: 'ตัวเปิดแก้ไข', + add: 'เพิ่ม', + title: 'ที่เปิดการสนทนา', + noDataPlaceHolder: 'การเริ่มการสนทนากับผู้ใช้สามารถช่วยให้ AI สร้างความสัมพันธ์ที่ใกล้ชิดกับพวกเขาในแอปพลิเคชันการสนทนา', + }, + modelConfig: { + modeType: { + completion: 'สมบูรณ์', + chat: 'สนทนา', + }, + model: 'แบบ', + title: 'รุ่นและพารามิเตอร์', + setTone: 'กําหนดน้ําเสียงของการตอบกลับ', + }, + inputs: { + run: 'วิ่ง', + userInputField: 'ฟิลด์ป้อนข้อมูลของผู้ใช้', + queryPlaceholder: 'กรุณากรอกข้อความคําขอ', + queryTitle: 'เนื้อหาแบบสอบถาม', + title: 'ดีบัก & ดูตัวอย่าง', + noVar: 'กรอกค่าของตัวแปร ซึ่งจะถูกแทนที่โดยอัตโนมัติในคําพร้อมท์ทุกครั้งที่เริ่มเซสชันใหม่', + previewTitle: 'พร้อมท์ดูตัวอย่าง', + chatVarTip: 'กรอกค่าของตัวแปร ซึ่งจะถูกแทนที่โดยอัตโนมัติในคําพร้อมท์ทุกครั้งที่เริ่มเซสชันใหม่', + noPrompt: 'ลองเขียนข้อความแจ้งในการป้อนข้อมูลล่วงหน้า', + completionVarTip: 'กรอกค่าของตัวแปร ซึ่งจะถูกแทนที่โดยอัตโนมัติในคําพร้อมท์ทุกครั้งที่มีการส่งคําถาม', + }, + datasetConfig: { + retrieveOneWay: { + title: 'การดึงข้อมูล N-to-1', + description: 'เอเจนต์จะเลือกความรู้ที่ดีที่สุดสําหรับการสืบค้นด้วยตนเอง ดีที่สุดสําหรับการใช้งานที่มีความรู้ที่แตกต่างและจํากัด', + }, + retrieveMultiWay: { + title: 'การดึงข้อมูลหลายเส้นทาง', + description: 'ตามความตั้งใจของผู้ใช้ การสืบค้นในความรู้ทั้งหมด ดึงข้อความที่เกี่ยวข้องจากหลายแหล่ง และเลือกผลลัพธ์ที่ดีที่สุดที่ตรงกับการสืบค้นของผู้ใช้หลังจากจัดอันดับใหม่', + }, + score_thresholdTip: 'ใช้เพื่อกําหนดเกณฑ์ความคล้ายคลึงกันสําหรับการกรองกลุ่ม', + settingTitle: 'การตั้งค่าการดึงข้อมูล', + rerankModelRequired: 'จําเป็นต้องมีโมเดลจัดอันดับใหม่ที่กําหนดค่าไว้', + knowledgeTip: 'คลิกปุ่ม " " เพื่อเพิ่มความรู้', + embeddingModelRequired: 'จําเป็นต้องมีโมเดลการฝังที่กําหนดค่าไว้', + score_threshold: 'เกณฑ์คะแนน', + retrieveChangeTip: 'การปรับเปลี่ยนโหมดดัชนีและโหมดการดึงข้อมูลอาจส่งผลต่อแอปพลิเคชันที่เกี่ยวข้องกับความรู้นี้', + top_k: 'ท็อป K', + params: 'พารามิเตอร์', + top_kTip: 'ใช้เพื่อกรองกลุ่มที่คล้ายกับคําถามของผู้ใช้มากที่สุด ระบบจะปรับค่าของ Top K แบบไดนามิกตาม max_tokens ของรุ่นที่เลือก', + }, + assistantType: { + chatAssistant: { + name: 'ผู้ช่วยพื้นฐาน', + description: 'สร้างผู้ช่วยตามแชทโดยใช้โมเดลภาษาขนาดใหญ่', + }, + agentAssistant: { + name: 'ผู้ช่วยตัวแทน', + description: 'สร้างตัวแทนอัจฉริยะที่สามารถเลือกเครื่องมือเพื่อทํางานให้เสร็จได้โดยอัตโนมัติ', + }, + name: 'ประเภทผู้ช่วย', + }, + agent: { + agentModeType: { + functionCall: 'การเรียกฟังก์ชัน', + ReACT: 'ตอบสนอง', + }, + setting: { + maximumIterations: { + description: 'จํากัดจํานวนการทําซ้ําที่ผู้ช่วยตัวแทนสามารถดําเนินการได้', + name: 'การทําซ้ําสูงสุด', + }, + name: 'การตั้งค่าตัวแทน', + description: 'การตั้งค่าผู้ช่วยตัวแทนอนุญาตให้ตั้งค่าโหมดตัวแทนและคุณสมบัติขั้นสูง เช่น ข้อความแจ้งในตัว ซึ่งใช้ได้เฉพาะในประเภทตัวแทนเท่านั้น', + }, + tools: { + enabled: 'เปิด', + name: 'เครื่อง มือ', + description: 'การใช้เครื่องมือสามารถขยายขีดความสามารถของ LLM ได้ เช่น การค้นหาทางอินเทอร์เน็ตหรือการคํานวณทางวิทยาศาสตร์', + }, + agentMode: 'โหมดตัวแทน', + firstPrompt: 'พรอมต์แรก', + buildInPrompt: 'พรอมต์ในตัว', + promptPlaceholder: 'เขียนข้อความแจ้งของคุณที่นี่', + nextIteration: 'การทําซ้ําครั้งต่อไป', + agentModeDes: 'ตั้งค่าประเภทของโหมดการอนุมานสําหรับตัวแทน', + }, + orchestrate: 'ออเคสตร้า', + variableTitle: 'ตัว แปร', + noResult: 'ผลลัพธ์จะแสดงที่นี่', + formattingChangedText: 'การแก้ไขการจัดรูปแบบจะรีเซ็ตพื้นที่ดีบัก คุณแน่ใจหรือไม่?', + publishAs: 'เผยแพร่เป็น', + result: 'ข้อความที่ส่งออก', + formattingChangedTitle: 'การจัดรูปแบบเปลี่ยนไป', + completionSubTitle: 'พรอมต์คํานําหน้า', + chatSubTitle: 'คำ แนะ นำ', + debugAsMultipleModel: 'ดีบักเป็นหลายรุ่น', + variableTip: 'ผู้ใช้กรอกตัวแปรในแบบฟอร์ม แทนที่ตัวแปรในพรอมต์โดยอัตโนมัติ', + debugAsSingleModel: 'ดีบักเป็นโมเดลเดียว', + duplicateModel: 'สำเนา', + autoAddVar: 'ตัวแปรที่ไม่ได้กําหนดอ้างอิงในพรอมต์ล่วงหน้าคุณต้องการเพิ่มในแบบฟอร์มการป้อนข้อมูลของผู้ใช้หรือไม่?', } export default translation diff --git a/web/i18n/tr-TR/app-debug.ts b/web/i18n/tr-TR/app-debug.ts index c9a5f7b585..152a00e428 100644 --- a/web/i18n/tr-TR/app-debug.ts +++ b/web/i18n/tr-TR/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Etkin modere içerik', }, fileUpload: { title: 'Dosya Yükleme', @@ -294,6 +295,7 @@ const translation = { 'Toplu görevin yanıtını tamamlamasını bekleyin.', notSelectModel: 'Lütfen bir model seçin', waitForImgUpload: 'Lütfen görüntünün yüklenmesini bekleyin', + waitForFileUpload: 'Lütfen dosyanın/dosyaların yüklenmesini bekleyin', }, chatSubTitle: 'Talimatlar', completionSubTitle: 'Ön Prompt', @@ -329,36 +331,64 @@ const translation = { queryNoBeEmpty: 'Sorgu prompt\'ta ayarlanmalıdır', }, variableConfig: { - addModalTitle: 'Giriş Alanı Ekle', - editModalTitle: 'Giriş Alanı Düzenle', - description: 'Değişken ayarı {{varName}}', - fieldType: 'Alan türü', - string: 'Kısa Metin', - textInput: 'Kısa Metin', - paragraph: 'Paragraf', - select: 'Seçim', - number: 'Numara', - notSet: 'Ayarlanmamış, ön promptta {{input}} yazmayı deneyin', - stringTitle: 'Form metin kutusu seçenekleri', - maxLength: 'En uzunluk', - options: 'Seçenekler', - addOption: 'Seçenek ekle', - apiBasedVar: 'API tabanlı Değişken', - varName: 'Değişken Adı', - labelName: 'Etiket Adı', - inputPlaceholder: 'Lütfen girin', - content: 'İçerik', - required: 'Gerekli', - errorMsg: { + 'addModalTitle': 'Giriş Alanı Ekle', + 'editModalTitle': 'Giriş Alanı Düzenle', + 'description': 'Değişken ayarı {{varName}}', + 'fieldType': 'Alan türü', + 'string': 'Kısa Metin', + 'textInput': 'Kısa Metin', + 'paragraph': 'Paragraf', + 'select': 'Seçim', + 'number': 'Numara', + 'notSet': 'Ayarlanmamış, ön promptta {{input}} yazmayı deneyin', + 'stringTitle': 'Form metin kutusu seçenekleri', + 'maxLength': 'En uzunluk', + 'options': 'Seçenekler', + 'addOption': 'Seçenek ekle', + 'apiBasedVar': 'API tabanlı Değişken', + 'varName': 'Değişken Adı', + 'labelName': 'Etiket Adı', + 'inputPlaceholder': 'Lütfen girin', + 'content': 'İçerik', + 'required': 'Gerekli', + 'errorMsg': { varNameRequired: 'Değişken adı gereklidir', labelNameRequired: 'Etiket adı gereklidir', varNameCanBeRepeat: 'Değişken adı tekrar edemez', atLeastOneOption: 'En az bir seçenek gereklidir', optionRepeat: 'Yinelenen seçenekler var', }, - defaultValue: 'Varsayılan değer', - noDefaultValue: 'Varsayılan değer yok', - selectDefaultValue: 'Varsayılan değer seç', + 'defaultValue': 'Varsayılan değer', + 'noDefaultValue': 'Varsayılan değer yok', + 'selectDefaultValue': 'Varsayılan değer seç', + 'file': { + image: { + name: 'Resim', + }, + audio: { + name: 'Ses', + }, + document: { + name: 'Belge', + }, + video: { + name: 'Video', + }, + custom: { + description: 'Diğer dosya türlerini belirtin.', + createPlaceholder: ' Dosya uzantısı, örneğin .doc', + name: 'Diğer dosya türleri', + }, + supportFileTypes: 'Destek Dosya Türleri', + }, + 'hide': 'Gizlemek', + 'uploadFileTypes': 'Dosya Türlerini Yükle', + 'localUpload': 'Yerel Yükleme', + 'single-file': 'Tek Dosya', + 'multi-files': 'Dosya Listesi', + 'text-input': 'Kısa Metin', + 'both': 'Her ikisi', + 'maxNumberOfUploads': 'Maksimum yükleme sayısı', }, vision: { name: 'Görüş', @@ -376,6 +406,7 @@ const translation = { url: 'URL', uploadLimit: 'Yükleme Limiti', }, + onlySupportVisionModelTip: 'Yalnızca görme modellerini destekler', }, voice: { name: 'Konuşma', @@ -445,6 +476,7 @@ const translation = { score_threshold: 'Skor Eşiği', score_thresholdTip: 'Parça filtreleme için benzerlik eşiğini ayarlamak için kullanılır.', retrieveChangeTip: 'Dizin modunu ve geri alım modunu değiştirmek, bu Bilgi ile ilişkili uygulamaları etkileyebilir.', + embeddingModelRequired: 'Yapılandırılmış bir Gömme Modeli gereklidir', }, debugAsSingleModel: 'Tek Model Olarak Hata Ayıkla', debugAsMultipleModel: 'Çoklu Model Olarak Hata Ayıkla', @@ -486,6 +518,26 @@ const translation = { enabled: 'Etkinleştirildi', }, }, + codegen: { + generatedCodeTitle: 'Oluşturulan Kod', + overwriteConfirmTitle: 'Mevcut kodun üzerine yazılsın mı?', + applyChanges: 'Değişiklikleri Uygula', + generate: 'Oluşturmak', + noDataLine2: 'Kod önizlemesi burada gösterilecektir.', + title: 'Kod Oluşturucu', + apply: 'Uygulamak', + instructionPlaceholder: 'Oluşturmak istediğiniz kodun ayrıntılı açıklamasını girin.', + description: 'Kod Oluşturucu, talimatlarınıza göre yüksek kaliteli kod oluşturmak için yapılandırılmış modelleri kullanır. Lütfen açık ve ayrıntılı talimatlar verin.', + resTitle: 'Oluşturulan Kod', + noDataLine1: 'Solda kullanım durumunuzu açıklayın,', + loading: 'Kod oluşturuluyor...', + instruction: 'Talimat -ları', + overwriteConfirmMessage: 'Bu eylem mevcut kodun üzerine yazacaktır. Devam etmek istiyor musunuz?', + }, + warningMessage: { + timeoutExceeded: 'Zaman aşımı nedeniyle sonuçlar görüntülenmez. Tam sonuçları almak için lütfen günlüklere bakın.', + }, + noResult: 'Çıktı burada görüntülenecektir.', } export default translation diff --git a/web/i18n/uk-UA/app-debug.ts b/web/i18n/uk-UA/app-debug.ts index fe6fefa801..4a9d77b761 100644 --- a/web/i18n/uk-UA/app-debug.ts +++ b/web/i18n/uk-UA/app-debug.ts @@ -161,11 +161,16 @@ const translation = { title: 'ІНСТРУМЕНТИ', // TOOLBOX (all caps to convey its section title nature) }, moderation: { - title: 'Модерація контенту', // Content moderation - description: 'Захистіть вивід моделі, використовуючи API модерації або список конфіденційних слів.', // Secure model output... - allEnabled: 'Вміст ВВЕДЕННЯ/ВИВЕДЕННЯ ввімкнено', // INPUT/OUTPUT Content Enabled - inputEnabled: 'Вміст ВВЕДЕННЯ ввімкнено', // INPUT Content Enabled - outputEnabled: 'Вміст ВИВЕДЕННЯ ввімкнено', // OUTPUT Content Enabled + // Content moderation + title: 'Модерація контенту', + // Secure model output... + description: 'Захистіть вивід моделі, використовуючи API модерації або список конфіденційних слів.', + // INPUT/OUTPUT Content Enabled + allEnabled: 'Вміст ВВЕДЕННЯ/ВИВЕДЕННЯ ввімкнено', + // INPUT Content Enabled + inputEnabled: 'Вміст ВВЕДЕННЯ ввімкнено', + // OUTPUT Content Enabled + outputEnabled: 'Вміст ВИВЕДЕННЯ ввімкнено', modal: { title: 'Налаштування модерації вмісту', // Content moderation settings provider: { @@ -197,6 +202,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Увімкнено помірний контент', }, fileUpload: { title: 'Завантаження файлу', @@ -248,23 +254,37 @@ const translation = { message: 'Скидання призводить до скасування змін, відновлюючи останню опубліковану конфігурацію.', }, errorMessage: { - nameOfKeyRequired: 'назва ключа: {{key}} обов’язкова', // name of the key: {{key}} required - valueOfVarRequired: 'значення {{key}} не може бути порожнім', // {{key}} value can not be empty - queryRequired: 'Текст запиту обов’язковий.', // Request text is required. - waitForResponse: 'Будь ласка, зачекайте, доки буде завершено відповідь на попереднє повідомлення.', // Please wait for the response to the previous message to complete. - waitForBatchResponse: 'Будь ласка, дочекайтеся завершення відповіді на пакетне завдання.', // Please wait for the response to the batch task to complete. - notSelectModel: 'Будь ласка, виберіть модель', // Please choose a model - waitForImgUpload: 'Будь ласка, зачекайте, поки зображення завантажиться', // Please wait for the image to upload + // name of the key: {{key}} required + nameOfKeyRequired: 'назва ключа: {{key}} обов’язкова', + // {{key}} value can not be empty + valueOfVarRequired: 'значення {{key}} не може бути порожнім', + // Request text is required. + queryRequired: 'Текст запиту обов’язковий.', + // Please wait for the response to the previous message to complete. + waitForResponse: 'Будь ласка, зачекайте, доки буде завершено відповідь на попереднє повідомлення.', + // Please wait for the response to the batch task to complete. + waitForBatchResponse: 'Будь ласка, дочекайтеся завершення відповіді на пакетне завдання.', + // Please choose a model + notSelectModel: 'Будь ласка, виберіть модель', + // Please wait for the image to upload + waitForImgUpload: 'Будь ласка, зачекайте, поки зображення завантажиться', + waitForFileUpload: 'Будь ласка, зачекайте, поки файл/файли завантажаться', }, - chatSubTitle: 'Інструкції', // Instructions - completionSubTitle: 'Префікс команди', // Prefix Prompt + // Instructions + chatSubTitle: 'Інструкції', + // Prefix Prompt + completionSubTitle: 'Префікс команди', promptTip: 'Запити керують відповідями ШІ, надаючи інструкції та обмеження. Вставте змінні, як-от {{input}}. Цей запит не буде видно користувачам.', - formattingChangedTitle: 'Змінено форматування', // Formatting changed - formattingChangedText: 'Змінення форматування призведе до скидання області налагодження. Ви впевнені?', // Modifying the formatting will reset the debug area, are you sure? - variableTitle: 'Змінні', // Variables + // Formatting changed + formattingChangedTitle: 'Змінено форматування', + // Modifying the formatting will reset the debug area, are you sure? + formattingChangedText: 'Змінення форматування призведе до скидання області налагодження. Ви впевнені?', + // Variables + variableTitle: 'Змінні', variableTip: 'Користувачі заповнюють змінні у формі, автоматично замінюючи змінні в команді.', notSetVar: 'Змінні дозволяють користувачам вводити підказки або вступні зауваження під час заповнення форм. Ви можете спробувати ввести "{{input}}" у слова підказки.', - autoAddVar: 'На невизначені змінні, на які посилаються в попередньому запиті, є посилання. Ви хочете додати їх у форму вводу користувача?', // Undefined variables referenced in pre-prompt, are you want to add them in user input form? + // Undefined variables referenced in pre-prompt, are you want to add them in user input form? + autoAddVar: 'На невизначені змінні, на які посилаються в попередньому запиті, є посилання. Ви хочете додати їх у форму вводу користувача?', variableTable: { key: 'Ключ змінної', // Variable Key name: 'Назва поля для введення користувача', // User Input Field Name @@ -316,11 +336,40 @@ const translation = { 'defaultValue': 'Значення за замовчуванням', 'noDefaultValue': 'Без значення за замовчуванням', 'selectDefaultValue': 'Обрати значення за замовчуванням', + 'file': { + image: { + name: 'Образ', + }, + audio: { + name: 'Аудіо', + }, + document: { + name: 'Документ', + }, + video: { + name: 'Відео', + }, + custom: { + description: 'Укажіть інші типи файлів.', + createPlaceholder: ' Розширення файлу, наприклад .doc', + name: 'Інші типи файлів', + }, + supportFileTypes: 'Підтримка типів файлів', + }, + 'content': 'Вміст', + 'both': 'Як', + 'single-file': 'Один файл', + 'multi-files': 'Список файлів', + 'localUpload': 'Локальне завантаження', + 'uploadFileTypes': 'Типи файлів для завантаження', + 'maxNumberOfUploads': 'Максимальна кількість завантажень', }, vision: { - name: 'Зображення', // Vision + // Vision + name: 'Зображення', description: 'Увімкнення функції "Зображення" дозволить моделі приймати зображення та відповідати на запитання про них.', - settings: 'Налаштування', // Settings + // Settings + settings: 'Налаштування', visionSettings: { title: 'Налаштування зображень', // Vision Settings resolution: 'Роздільна здатність', // Resolution @@ -335,6 +384,7 @@ const translation = { url: 'URL-адреса', // URL uploadLimit: 'Ліміт завантаження', // Upload Limit }, + onlySupportVisionModelTip: 'Підтримує лише моделі зору', }, voice: { name: 'Голос', // Voice @@ -384,9 +434,11 @@ const translation = { queryPlaceholder: 'Будь ласка, введіть текст запиту', // Please enter the request text. run: 'ЗАПУСТИТИ', // RUN }, - result: 'Вихідний текст', // Output Text + // Output Text + result: 'Вихідний текст', datasetConfig: { - settingTitle: 'Налаштування пошуку', // Retrieval settings + // Retrieval settings + settingTitle: 'Налаштування пошуку', knowledgeTip: 'Клацніть кнопку “+”, щоб додати знання', retrieveOneWay: { title: 'Односторонній пошук', // N-to-1 retrieval @@ -396,18 +448,28 @@ const translation = { title: 'Багатосторонній пошук', // Multi-path retrieval description: 'На основі намірів користувача запитує по всіх Базах Знань, отримує релевантний текст із кількох джерел і вибирає найкращі результати, що відповідають запиту користувача, після переранжування. Необхідна конфігурація API моделі переранжування.', }, - rerankModelRequired: 'Необхідна модель переранжування', // Rerank model is required - params: 'Параметри', // Params - top_k: 'Найкращих K', // Top K + // Rerank model is required + rerankModelRequired: 'Необхідна модель переранжування', + // Params + params: 'Параметри', + // Top K + top_k: 'Найкращих K', top_kTip: 'Використовується для фільтрації фрагментів, найбільш схожих на запитання користувачів. Система також динамічно регулюватиме значення K у відповідності з max_tokens обраної моделі.', - score_threshold: 'Поріг оцінки', // Score Threshold + // Score Threshold + score_threshold: 'Поріг оцінки', score_thresholdTip: 'Використовується для встановлення порогу схожості для фільтрації фрагментів.', - retrieveChangeTip: 'Зміна режиму індексування та режиму отримання може вплинути на застосунки, пов’язані з цими знаннями.', // Modifying... + // Modifying... + retrieveChangeTip: 'Зміна режиму індексування та режиму отримання може вплинути на застосунки, пов’язані з цими знаннями.', + embeddingModelRequired: 'Потрібна налаштована модель вбудовування', }, - debugAsSingleModel: 'Налагодження як одна модель', // Debug as Single Model - debugAsMultipleModel: 'Налагодження як багато моделей', // Debug as Multiple Models - duplicateModel: 'Дублювання', // Duplicate - publishAs: 'Опублікувати як', // Publish as + // Debug as Single Model + debugAsSingleModel: 'Налагодження як одна модель', + // Debug as Multiple Models + debugAsMultipleModel: 'Налагодження як багато моделей', + // Duplicate + duplicateModel: 'Дублювання', + // Publish as + publishAs: 'Опублікувати як', assistantType: { name: 'Тип Асистента', // Assistant Type chatAssistant: { @@ -444,6 +506,79 @@ const translation = { enabled: 'Увімкнено', // Enabled }, }, + codegen: { + generatedCodeTitle: 'Згенерований код', + generate: 'Генерувати', + title: 'Генератор коду', + loading: 'Генерація коду...', + instruction: 'Інструкції', + applyChanges: 'Застосувати зміни', + resTitle: 'Згенерований код', + noDataLine2: 'Тут з\'явиться попередній перегляд коду.', + noDataLine1: 'Опишіть свій випадок використання зліва,', + apply: 'Застосовувати', + overwriteConfirmTitle: 'Перезаписати існуючий код?', + overwriteConfirmMessage: 'Ця дія перезапише існуючий код. Хочете продовжити?', + instructionPlaceholder: 'Введіть детальний опис коду, який ви хочете згенерувати.', + description: 'Генератор коду використовує налаштовані моделі для генерації високоякісного коду на основі ваших інструкцій. Будь ласка, надайте чіткі та детальні інструкції.', + }, + generate: { + template: { + pythonDebugger: { + name: 'Налагоджувач Python', + instruction: 'Бот, який може генерувати та налагоджувати ваш код на основі ваших інструкцій', + }, + translation: { + name: 'Переклад', + instruction: 'Перекладач, який може перекладати кількома мовами', + }, + professionalAnalyst: { + name: 'Професійний аналітик', + instruction: 'Отримуйте аналітичні дані, виявляйте ризики та перетворюйте ключову інформацію з довгих звітів в єдину записку', + }, + excelFormulaExpert: { + name: 'Експерт з формул Excel', + instruction: 'Чат-бот, який може допомогти користувачам-початківцям розуміти, використовувати та створювати формули Excel на основі інструкцій користувача', + }, + travelPlanning: { + name: 'Планування подорожей', + instruction: 'Помічник із планування подорожей — це інтелектуальний інструмент, розроблений, щоб допомогти користувачам без зусиль планувати свої поїздки', + }, + SQLSorcerer: { + name: 'SQL чаклун', + instruction: 'Перетворюйте повсякденну мову на SQL-запити', + }, + GitGud: { + name: 'Git gud', + instruction: 'Генеруйте відповідні команди Git на основі описаних користувачем дій контролю версій', + }, + meetingTakeaways: { + name: 'Підсумки зустрічі', + instruction: 'Перетворіть зустрічі на стислі підсумки, включаючи теми для обговорення, ключові висновки та пункти дій', + }, + writingsPolisher: { + name: 'Письменницька полірувальна машина', + instruction: 'Використовуйте передові методи редагування тексту, щоб покращити свої тексти', + }, + }, + instruction: 'Інструкції', + generate: 'Генерувати', + apply: 'Застосовувати', + tryIt: 'Спробуйте', + overwriteTitle: 'Змінити існуючу конфігурацію?', + instructionPlaceHolder: 'Пишіть чіткі та конкретні інструкції.', + loading: 'Оркестрування програми для вас...', + noDataLine1: 'Опишіть свій випадок використання зліва,', + resTitle: 'Згенерований запит', + title: 'Генератор підказок', + noDataLine2: 'Тут буде показано попередній перегляд оркестровки.', + overwriteMessage: 'Застосування цього рядка замінить існуючу конфігурацію.', + description: 'Генератор підказок використовує налаштовану модель для оптимізації запитів для кращої якості та кращої структури. Напишіть, будь ласка, зрозумілу та детальну інструкцію.', + }, + warningMessage: { + timeoutExceeded: 'Результати не відображаються через тайм-аут. Будь ласка, зверніться до журналів, щоб отримати повні результати.', + }, + noResult: 'Тут буде відображено вихідні дані.', } export default translation diff --git a/web/i18n/vi-VN/app-debug.ts b/web/i18n/vi-VN/app-debug.ts index 381b766306..8882d4af38 100644 --- a/web/i18n/vi-VN/app-debug.ts +++ b/web/i18n/vi-VN/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '', }, }, + contentEnableLabel: 'Đã bật nội dung kiểm duyệt', }, fileUpload: { title: 'Tải lên tệp', @@ -255,6 +256,7 @@ const translation = { waitForBatchResponse: 'Vui lòng đợi phản hồi của tác vụ hàng loạt để hoàn thành.', notSelectModel: 'Vui lòng chọn một mô hình', waitForImgUpload: 'Vui lòng đợi hình ảnh được tải lên', + waitForFileUpload: 'Vui lòng đợi tệp / tệp tải lên', }, chatSubTitle: 'Hướng dẫn', completionSubTitle: 'Tiền tố lời nhắc', @@ -316,6 +318,33 @@ const translation = { 'defaultValue': 'Giá trị mặc định', 'noDefaultValue': 'Không có giá trị mặc định', 'selectDefaultValue': 'Chọn giá trị mặc định', + 'file': { + image: { + name: 'Ảnh', + }, + audio: { + name: 'Âm thanh', + }, + document: { + name: 'Tài liệu', + }, + video: { + name: 'Video', + }, + custom: { + description: 'Chỉ định các loại tệp khác.', + name: 'Các loại tệp khác', + createPlaceholder: ' Phần mở rộng tệp, ví dụ: .doc', + }, + supportFileTypes: 'Các loại tệp hỗ trợ', + }, + 'both': 'Cả hai', + 'uploadFileTypes': 'Tải lên các loại tệp', + 'localUpload': 'Tải lên cục bộ', + 'single-file': 'Tệp đơn', + 'content': 'Nội dung', + 'multi-files': 'Danh sách tập tin', + 'maxNumberOfUploads': 'Số lượt tải lên tối đa', }, vision: { name: 'Thị giác', @@ -335,6 +364,7 @@ const translation = { url: 'URL', uploadLimit: 'Giới hạn tải lên', }, + onlySupportVisionModelTip: 'Chỉ hỗ trợ các mô hình thị giác', }, voice: { name: 'Giọng nói', @@ -403,6 +433,7 @@ const translation = { score_threshold: 'Ngưỡng điểm', score_thresholdTip: 'Sử dụng để thiết lập ngưỡng tương đồng cho việc lọc các phần.', retrieveChangeTip: 'Thay đổi chế độ chỉ mục và chế độ truy xuất có thể ảnh hưởng đến các ứng dụng liên quan đến kiến thức này.', + embeddingModelRequired: 'Cần có Mô hình nhúng được định cấu hình', }, debugAsSingleModel: 'Gỡ lỗi như một mô hình', debugAsMultipleModel: 'Gỡ lỗi như nhiều mô hình', @@ -444,6 +475,79 @@ const translation = { enabled: 'Đã kích hoạt', }, }, + codegen: { + generate: 'Đẻ ra', + instruction: 'Chỉ thị', + generatedCodeTitle: 'Mã được tạo', + loading: 'Đang tạo mã...', + title: 'Trình tạo mã', + instructionPlaceholder: 'Nhập mô tả chi tiết về mã bạn muốn tạo.', + overwriteConfirmMessage: 'Hành động này sẽ ghi đè lên mã hiện có. Bạn có muốn tiếp tục không?', + description: 'Trình tạo mã sử dụng các mô hình đã định cấu hình để tạo mã chất lượng cao dựa trên hướng dẫn của bạn. Vui lòng cung cấp hướng dẫn rõ ràng và chi tiết.', + resTitle: 'Mã được tạo', + apply: 'Áp dụng', + overwriteConfirmTitle: 'Ghi đè mã hiện có?', + applyChanges: 'Áp dụng thay đổi', + noDataLine1: 'Mô tả trường hợp sử dụng của bạn ở bên trái,', + noDataLine2: 'Bản xem trước mã sẽ hiển thị ở đây.', + }, + generate: { + template: { + pythonDebugger: { + instruction: 'Một bot có thể tạo và gỡ lỗi mã của bạn dựa trên hướng dẫn của bạn', + name: 'Trình gỡ lỗi Python', + }, + translation: { + name: 'Dịch', + instruction: 'Một dịch giả có thể dịch nhiều ngôn ngữ', + }, + professionalAnalyst: { + name: 'Chuyên viên phân tích chuyên nghiệp', + instruction: 'Trích xuất thông tin chi tiết, xác định rủi ro và chắt lọc thông tin quan trọng từ các báo cáo dài thành một bản ghi nhớ duy nhất', + }, + excelFormulaExpert: { + name: 'Chuyên gia công thức Excel', + instruction: 'Một chatbot có thể giúp người dùng mới hiểu, sử dụng và tạo công thức Excel dựa trên hướng dẫn của người dùng', + }, + travelPlanning: { + instruction: 'Trợ lý lập kế hoạch du lịch là một công cụ thông minh được thiết kế để giúp người dùng dễ dàng lên kế hoạch cho các chuyến đi của họ', + name: 'Lập kế hoạch du lịch', + }, + SQLSorcerer: { + instruction: 'Chuyển đổi ngôn ngữ hàng ngày thành truy vấn SQL', + name: 'SQL sorcerer', + }, + GitGud: { + name: 'Git gud', + instruction: 'Tạo các lệnh Git thích hợp dựa trên các hành động kiểm soát phiên bản được người dùng mô tả', + }, + meetingTakeaways: { + name: 'Bài học rút ra trong cuộc họp', + instruction: 'Chắt lọc các cuộc họp thành các bản tóm tắt ngắn gọn bao gồm các chủ đề thảo luận, bài học chính và các mục hành động', + }, + writingsPolisher: { + name: 'Máy đánh bóng viết', + instruction: 'Sử dụng các kỹ thuật chỉnh sửa nội dung nâng cao để cải thiện bài viết của bạn', + }, + }, + generate: 'Đẻ ra', + tryIt: 'Dùng thử', + noDataLine2: 'Bản xem trước Orchestration sẽ hiển thị ở đây.', + apply: 'Áp dụng', + instruction: 'Chỉ thị', + title: 'Trình tạo nhắc nhở', + resTitle: 'Lời nhắc được tạo', + loading: 'Sắp xếp ứng dụng cho bạn...', + noDataLine1: 'Mô tả trường hợp sử dụng của bạn ở bên trái,', + description: 'Trình tạo lời nhắc sử dụng mô hình được định cấu hình để tối ưu hóa lời nhắc cho chất lượng cao hơn và cấu trúc tốt hơn. Vui lòng viết hướng dẫn rõ ràng và chi tiết.', + overwriteMessage: 'Áp dụng lời nhắc này sẽ ghi đè cấu hình hiện có.', + overwriteTitle: 'Ghi đè cấu hình hiện có?', + instructionPlaceHolder: 'Viết hướng dẫn rõ ràng và cụ thể.', + }, + warningMessage: { + timeoutExceeded: 'Kết quả không được hiển thị do hết thời gian chờ. Vui lòng tham khảo nhật ký để thu thập kết quả đầy đủ.', + }, + noResult: 'Đầu ra sẽ được hiển thị ở đây.', } export default translation diff --git a/web/i18n/zh-Hant/app-debug.ts b/web/i18n/zh-Hant/app-debug.ts index 434bc830a5..5309f03da3 100644 --- a/web/i18n/zh-Hant/app-debug.ts +++ b/web/i18n/zh-Hant/app-debug.ts @@ -197,6 +197,7 @@ const translation = { after: '中配置 OpenAI API 金鑰。', }, }, + contentEnableLabel: '啟用了中等內容', }, fileUpload: { title: '檔案上傳', @@ -238,6 +239,7 @@ const translation = { waitForBatchResponse: '請等待批次任務完成', notSelectModel: '請選擇模型', waitForImgUpload: '請等待圖片上傳完成', + waitForFileUpload: '請等待檔上傳', }, chatSubTitle: '提示詞', completionSubTitle: '字首提示詞', @@ -302,6 +304,33 @@ const translation = { 'defaultValue': '預設值', 'noDefaultValue': '無預設值', 'selectDefaultValue': '選擇預設值', + 'file': { + image: { + name: '圖像', + }, + audio: { + name: '音訊', + }, + document: { + name: '公文', + }, + video: { + name: '視頻', + }, + custom: { + name: '其他文件類型', + description: '指定其他檔案類型。', + createPlaceholder: '檔擴展名,例如 .doc', + }, + supportFileTypes: '支援檔案類型', + }, + 'both': '雙', + 'uploadFileTypes': '上傳檔類型', + 'multi-files': '檔案清單', + 'content': '內容', + 'localUpload': '本地上傳', + 'single-file': '單個檔', + 'maxNumberOfUploads': '最大上傳次數', }, vision: { name: '視覺', @@ -321,6 +350,7 @@ const translation = { url: 'URL', uploadLimit: '上傳數量限制', }, + onlySupportVisionModelTip: '僅支持視覺模型', }, voice: { name: '音色', @@ -390,6 +420,7 @@ const translation = { score_threshold: 'Score 閾值', score_thresholdTip: '用於設定文字片段篩選的相似度閾值。', retrieveChangeTip: '修改索引模式和檢索模式可能會影響與該知識庫關聯的應用程式。', + embeddingModelRequired: '需要配置的嵌入模型', }, debugAsSingleModel: '單一模型進行除錯', debugAsMultipleModel: '多個模型進行除錯', @@ -431,6 +462,79 @@ const translation = { enabled: '啟用', }, }, + codegen: { + resTitle: '生成的代碼', + apply: '應用', + overwriteConfirmMessage: '此作將覆蓋現有代碼。你想繼續嗎?', + instruction: '指示', + instructionPlaceholder: '輸入要生成的代碼的詳細說明。', + generate: '生成', + noDataLine2: '代碼預覽將在此處顯示。', + applyChanges: '應用更改', + noDataLine1: '在左側描述您的用例,', + overwriteConfirmTitle: '覆蓋現有代碼?', + title: '代碼生成器', + generatedCodeTitle: '生成的代碼', + loading: '產生代碼...', + description: '代碼生成器使用配置的模型根據您的指令生成高質量的代碼。請提供清晰詳細的說明。', + }, + generate: { + template: { + pythonDebugger: { + instruction: '可以根據您的指令生成和調試代碼的機器人', + name: 'Python 調試器', + }, + translation: { + name: '譯本', + instruction: '可以翻譯多種語言的翻譯器', + }, + professionalAnalyst: { + instruction: '提取見解、識別風險並將長報告中的關鍵資訊提煉成單個備忘錄', + name: '專業分析師', + }, + excelFormulaExpert: { + name: 'Excel公式專家', + instruction: '一個聊天機器人,可以説明新手使用者根據使用者指令理解、使用和創建Excel公式', + }, + travelPlanning: { + instruction: '旅行計劃助手是一款智慧工具,旨在説明用戶輕鬆計劃他們的旅行', + name: '旅行計劃', + }, + SQLSorcerer: { + instruction: '將日常語言轉換為 SQL 查詢', + name: 'SQL 巫師', + }, + GitGud: { + instruction: '根據使用者描述的版本控制作生成適當的 Git 命令', + name: '吉特古德', + }, + meetingTakeaways: { + name: '會議要點', + instruction: '將會議提煉成簡潔的摘要,包括討論主題、關鍵要點和行動專案', + }, + writingsPolisher: { + instruction: '使用先進的文案編輯技術來改進您的寫作', + name: '書寫拋光機', + }, + }, + overwriteMessage: '應用此提示將覆蓋現有配置。', + tryIt: '試試看', + noDataLine1: '在左側描述您的用例,', + instruction: '指示', + description: '提示生成器使用配置的模型來優化提示,以獲得更高的品質和更好的結構。請寫出清晰詳細的說明。', + generate: '生成', + apply: '應用', + instructionPlaceHolder: '寫出清晰具體的說明。', + overwriteTitle: '覆蓋現有配置?', + title: '提示生成器', + loading: '為您編排應用程式...', + noDataLine2: '業務流程預覽將在此處顯示。', + resTitle: '生成的提示', + }, + warningMessage: { + timeoutExceeded: '由於超時,不顯示結果。請參閱日誌以收集完整結果。', + }, + noResult: '輸出將顯示在此處。', } export default translation From 146d87009860e8474a6a360e76b8e83a3dddb15e Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Mon, 4 Aug 2025 14:37:36 +0800 Subject: [PATCH 130/415] Fix: avoid Flask route conflict by merging `DocumentDetailApi` and `DocumentDeleteApi` (#23333) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../console/datasets/datasets_document.py | 51 +++++++------- .../service_api/dataset/document.py | 69 +++++++++---------- 2 files changed, 57 insertions(+), 63 deletions(-) diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index b6e91dd98e..4e0955bd43 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -642,7 +642,7 @@ class DocumentIndexingStatusApi(DocumentResource): return marshal(document_dict, document_status_fields) -class DocumentDetailApi(DocumentResource): +class DocumentApi(DocumentResource): METADATA_CHOICES = {"all", "only", "without"} @setup_required @@ -730,6 +730,28 @@ class DocumentDetailApi(DocumentResource): return response, 200 + @setup_required + @login_required + @account_initialization_required + @cloud_edition_billing_rate_limit_check("knowledge") + def delete(self, dataset_id, document_id): + dataset_id = str(dataset_id) + document_id = str(document_id) + dataset = DatasetService.get_dataset(dataset_id) + if dataset is None: + raise NotFound("Dataset not found.") + # check user's model setting + DatasetService.check_dataset_model_setting(dataset) + + document = self.get_document(dataset_id, document_id) + + try: + DocumentService.delete_document(document) + except services.errors.document.DocumentIndexingError: + raise DocumentIndexingError("Cannot delete document during indexing.") + + return {"result": "success"}, 204 + class DocumentProcessingApi(DocumentResource): @setup_required @@ -768,30 +790,6 @@ class DocumentProcessingApi(DocumentResource): return {"result": "success"}, 200 -class DocumentDeleteApi(DocumentResource): - @setup_required - @login_required - @account_initialization_required - @cloud_edition_billing_rate_limit_check("knowledge") - def delete(self, dataset_id, document_id): - dataset_id = str(dataset_id) - document_id = str(document_id) - dataset = DatasetService.get_dataset(dataset_id) - if dataset is None: - raise NotFound("Dataset not found.") - # check user's model setting - DatasetService.check_dataset_model_setting(dataset) - - document = self.get_document(dataset_id, document_id) - - try: - DocumentService.delete_document(document) - except services.errors.document.DocumentIndexingError: - raise DocumentIndexingError("Cannot delete document during indexing.") - - return {"result": "success"}, 204 - - class DocumentMetadataApi(DocumentResource): @setup_required @login_required @@ -1037,11 +1035,10 @@ api.add_resource( api.add_resource(DocumentBatchIndexingEstimateApi, "/datasets//batch//indexing-estimate") api.add_resource(DocumentBatchIndexingStatusApi, "/datasets//batch//indexing-status") api.add_resource(DocumentIndexingStatusApi, "/datasets//documents//indexing-status") -api.add_resource(DocumentDetailApi, "/datasets//documents/") +api.add_resource(DocumentApi, "/datasets//documents/") api.add_resource( DocumentProcessingApi, "/datasets//documents//processing/" ) -api.add_resource(DocumentDeleteApi, "/datasets//documents/") api.add_resource(DocumentMetadataApi, "/datasets//documents//metadata") api.add_resource(DocumentStatusApi, "/datasets//documents/status//batch") api.add_resource(DocumentPauseApi, "/datasets//documents//processing/pause") diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index ac85c0b38d..77600aa18c 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -358,39 +358,6 @@ class DocumentUpdateByFileApi(DatasetApiResource): return documents_and_batch_fields, 200 -class DocumentDeleteApi(DatasetApiResource): - @cloud_edition_billing_rate_limit_check("knowledge", "dataset") - def delete(self, tenant_id, dataset_id, document_id): - """Delete document.""" - document_id = str(document_id) - dataset_id = str(dataset_id) - tenant_id = str(tenant_id) - - # get dataset info - dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first() - - if not dataset: - raise ValueError("Dataset does not exist.") - - document = DocumentService.get_document(dataset.id, document_id) - - # 404 if document not found - if document is None: - raise NotFound("Document Not Exists.") - - # 403 if document is archived - if DocumentService.check_archived(document): - raise ArchivedDocumentImmutableError() - - try: - # delete document - DocumentService.delete_document(document) - except services.errors.document.DocumentIndexingError: - raise DocumentIndexingError("Cannot delete document during indexing.") - - return 204 - - class DocumentListApi(DatasetApiResource): def get(self, tenant_id, dataset_id): dataset_id = str(dataset_id) @@ -473,7 +440,7 @@ class DocumentIndexingStatusApi(DatasetApiResource): return data -class DocumentDetailApi(DatasetApiResource): +class DocumentApi(DatasetApiResource): METADATA_CHOICES = {"all", "only", "without"} def get(self, tenant_id, dataset_id, document_id): @@ -567,6 +534,37 @@ class DocumentDetailApi(DatasetApiResource): return response + @cloud_edition_billing_rate_limit_check("knowledge", "dataset") + def delete(self, tenant_id, dataset_id, document_id): + """Delete document.""" + document_id = str(document_id) + dataset_id = str(dataset_id) + tenant_id = str(tenant_id) + + # get dataset info + dataset = db.session.query(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).first() + + if not dataset: + raise ValueError("Dataset does not exist.") + + document = DocumentService.get_document(dataset.id, document_id) + + # 404 if document not found + if document is None: + raise NotFound("Document Not Exists.") + + # 403 if document is archived + if DocumentService.check_archived(document): + raise ArchivedDocumentImmutableError() + + try: + # delete document + DocumentService.delete_document(document) + except services.errors.document.DocumentIndexingError: + raise DocumentIndexingError("Cannot delete document during indexing.") + + return 204 + api.add_resource( DocumentAddByTextApi, @@ -588,7 +586,6 @@ api.add_resource( "/datasets//documents//update_by_file", "/datasets//documents//update-by-file", ) -api.add_resource(DocumentDeleteApi, "/datasets//documents/") +api.add_resource(DocumentApi, "/datasets//documents/") api.add_resource(DocumentListApi, "/datasets//documents") api.add_resource(DocumentIndexingStatusApi, "/datasets//documents//indexing-status") -api.add_resource(DocumentDetailApi, "/datasets//documents/") From 8041808b53610275283bf6cbc0a44f8a163da254 Mon Sep 17 00:00:00 2001 From: Tianyi Jing Date: Mon, 4 Aug 2025 14:39:54 +0800 Subject: [PATCH 131/415] fix: diplay all helpfields (#23348) Signed-off-by: jingfelix --- .../base/form/components/base/base-field.tsx | 17 +++++++++++++++++ .../plugin-auth/authorize/api-key-modal.tsx | 17 +---------------- .../authorize/oauth-client-settings.tsx | 17 ----------------- 3 files changed, 18 insertions(+), 33 deletions(-) diff --git a/web/app/components/base/form/components/base/base-field.tsx b/web/app/components/base/form/components/base/base-field.tsx index 8120fad6b0..00a1f9b2da 100644 --- a/web/app/components/base/form/components/base/base-field.tsx +++ b/web/app/components/base/form/components/base/base-field.tsx @@ -3,6 +3,7 @@ import { memo, useMemo, } from 'react' +import { RiExternalLinkLine } from '@remixicon/react' import type { AnyFieldApi } from '@tanstack/react-form' import { useStore } from '@tanstack/react-form' import cn from '@/utils/classnames' @@ -200,6 +201,22 @@ const BaseField = ({
    ) } + { + formSchema.url && ( + + + {renderI18nObject(formSchema?.help as any)} + + { + + } + + ) + }
    ) diff --git a/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx b/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx index d582c660b6..21946c4b51 100644 --- a/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx @@ -6,7 +6,6 @@ import { useState, } from 'react' import { useTranslation } from 'react-i18next' -import { RiExternalLinkLine } from '@remixicon/react' import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security' import Modal from '@/app/components/base/modal/modal' import { CredentialTypeEnum } from '../types' @@ -21,7 +20,6 @@ import { useGetPluginCredentialSchemaHook, useUpdatePluginCredentialHook, } from '../hooks/use-credential' -import { useRenderI18nObject } from '@/hooks/use-i18n' export type ApiKeyModalProps = { pluginPayload: PluginPayload @@ -64,8 +62,6 @@ const ApiKeyModal = ({ acc[schema.name] = schema.default return acc }, {} as Record) - const helpField = formSchemas.find(schema => schema.url && schema.help) - const renderI18nObject = useRenderI18nObject() const { mutateAsync: addPluginCredential } = useAddPluginCredentialHook(pluginPayload) const { mutateAsync: updatePluginCredential } = useUpdatePluginCredentialHook(pluginPayload) const formRef = useRef(null) @@ -125,18 +121,7 @@ const ApiKeyModal = ({ onClose={onClose} onCancel={onClose} footerSlot={ - helpField && ( - - - {renderI18nObject(helpField?.help as any)} - - - - ) + (
    ) } bottomSlot={
    diff --git a/web/app/components/plugins/plugin-auth/authorize/oauth-client-settings.tsx b/web/app/components/plugins/plugin-auth/authorize/oauth-client-settings.tsx index 14c7ed957f..c10b06166b 100644 --- a/web/app/components/plugins/plugin-auth/authorize/oauth-client-settings.tsx +++ b/web/app/components/plugins/plugin-auth/authorize/oauth-client-settings.tsx @@ -4,7 +4,6 @@ import { useRef, useState, } from 'react' -import { RiExternalLinkLine } from '@remixicon/react' import { useForm, useStore, @@ -24,7 +23,6 @@ import type { } from '@/app/components/base/form/types' import { useToastContext } from '@/app/components/base/toast' import Button from '@/app/components/base/button' -import { useRenderI18nObject } from '@/hooks/use-i18n' type OAuthClientSettingsProps = { pluginPayload: PluginPayload @@ -129,8 +127,6 @@ const OAuthClientSettings = ({ defaultValues: editValues || defaultValues, }) const __oauth_client__ = useStore(form.store, s => s.values.__oauth_client__) - const helpField = schemas.find(schema => schema.url && schema.help) - const renderI18nObject = useRenderI18nObject() return ( - { - helpField && __oauth_client__ === 'custom' && ( - - - {renderI18nObject(helpField?.help as any)} - - - - )} ) From 60c7663a807353d07ad24ed6759e35ea5c642dba Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 4 Aug 2025 19:27:36 +0800 Subject: [PATCH 132/415] Feat add testcontainers test (#23269) --- .github/workflows/api-tests.yml | 3 + api/pyproject.toml | 1 + .../__init__.py | 0 .../conftest.py | 328 ++++++++++++++++ .../factories/__init__.py | 0 .../factories/test_storage_key_loader.py | 371 ++++++++++++++++++ .../workflow/__init__.py | 0 .../workflow/nodes/__init__.py | 0 .../workflow/nodes/code_executor/__init__.py | 0 .../nodes/code_executor/test_code_executor.py | 11 + .../code_executor/test_code_javascript.py | 47 +++ .../nodes/code_executor/test_code_jinja2.py | 42 ++ .../nodes/code_executor/test_code_python3.py | 47 +++ .../nodes/code_executor/test_utils.py | 115 ++++++ api/uv.lock | 32 ++ dev/pytest/pytest_all_tests.sh | 3 + dev/pytest/pytest_testcontainers.sh | 7 + 17 files changed, 1007 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/__init__.py create mode 100644 api/tests/test_containers_integration_tests/conftest.py create mode 100644 api/tests/test_containers_integration_tests/factories/__init__.py create mode 100644 api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py create mode 100644 api/tests/test_containers_integration_tests/workflow/__init__.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/__init__.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/__init__.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_executor.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py create mode 100644 api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_utils.py create mode 100755 dev/pytest/pytest_testcontainers.sh diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index a5a5071fae..9c3daddbfc 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -99,3 +99,6 @@ jobs: - name: Run Tool run: uv run --project api bash dev/pytest/pytest_tools.sh + + - name: Run TestContainers + run: uv run --project api bash dev/pytest/pytest_testcontainers.sh diff --git a/api/pyproject.toml b/api/pyproject.toml index be42b509ed..d8f663ef8d 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -114,6 +114,7 @@ dev = [ "pytest-cov~=4.1.0", "pytest-env~=1.1.3", "pytest-mock~=3.14.0", + "testcontainers~=4.10.0", "types-aiofiles~=24.1.0", "types-beautifulsoup4~=4.12.0", "types-cachetools~=5.5.0", diff --git a/api/tests/test_containers_integration_tests/__init__.py b/api/tests/test_containers_integration_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/conftest.py b/api/tests/test_containers_integration_tests/conftest.py new file mode 100644 index 0000000000..0369a5cbd0 --- /dev/null +++ b/api/tests/test_containers_integration_tests/conftest.py @@ -0,0 +1,328 @@ +""" +TestContainers-based integration test configuration for Dify API. + +This module provides containerized test infrastructure using TestContainers library +to spin up real database and service instances for integration testing. This approach +ensures tests run against actual service implementations rather than mocks, providing +more reliable and realistic test scenarios. +""" + +import logging +import os +from collections.abc import Generator +from typing import Optional + +import pytest +from flask import Flask +from flask.testing import FlaskClient +from sqlalchemy.orm import Session +from testcontainers.core.container import DockerContainer +from testcontainers.core.waiting_utils import wait_for_logs +from testcontainers.postgres import PostgresContainer +from testcontainers.redis import RedisContainer + +from app_factory import create_app +from models import db + +# Configure logging for test containers +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") +logger = logging.getLogger(__name__) + + +class DifyTestContainers: + """ + Manages all test containers required for Dify integration tests. + + This class provides a centralized way to manage multiple containers + needed for comprehensive integration testing, including databases, + caches, and search engines. + """ + + def __init__(self): + """Initialize container management with default configurations.""" + self.postgres: Optional[PostgresContainer] = None + self.redis: Optional[RedisContainer] = None + self.dify_sandbox: Optional[DockerContainer] = None + self._containers_started = False + logger.info("DifyTestContainers initialized - ready to manage test containers") + + def start_containers_with_env(self) -> None: + """ + Start all required containers for integration testing. + + This method initializes and starts PostgreSQL, Redis + containers with appropriate configurations for Dify testing. Containers + are started in dependency order to ensure proper initialization. + """ + if self._containers_started: + logger.info("Containers already started - skipping container startup") + return + + logger.info("Starting test containers for Dify integration tests...") + + # Start PostgreSQL container for main application database + # PostgreSQL is used for storing user data, workflows, and application state + logger.info("Initializing PostgreSQL container...") + self.postgres = PostgresContainer( + image="postgres:16-alpine", + ) + self.postgres.start() + db_host = self.postgres.get_container_host_ip() + db_port = self.postgres.get_exposed_port(5432) + os.environ["DB_HOST"] = db_host + os.environ["DB_PORT"] = str(db_port) + os.environ["DB_USERNAME"] = self.postgres.username + os.environ["DB_PASSWORD"] = self.postgres.password + os.environ["DB_DATABASE"] = self.postgres.dbname + logger.info( + "PostgreSQL container started successfully - Host: %s, Port: %s User: %s, Database: %s", + db_host, + db_port, + self.postgres.username, + self.postgres.dbname, + ) + + # Wait for PostgreSQL to be ready + logger.info("Waiting for PostgreSQL to be ready to accept connections...") + wait_for_logs(self.postgres, "is ready to accept connections", timeout=30) + logger.info("PostgreSQL container is ready and accepting connections") + + # Install uuid-ossp extension for UUID generation + logger.info("Installing uuid-ossp extension...") + try: + import psycopg2 + + conn = psycopg2.connect( + host=db_host, + port=db_port, + user=self.postgres.username, + password=self.postgres.password, + database=self.postgres.dbname, + ) + conn.autocommit = True + cursor = conn.cursor() + cursor.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";') + cursor.close() + conn.close() + logger.info("uuid-ossp extension installed successfully") + except Exception as e: + logger.warning("Failed to install uuid-ossp extension: %s", e) + + # Set up storage environment variables + os.environ["STORAGE_TYPE"] = "opendal" + os.environ["OPENDAL_SCHEME"] = "fs" + os.environ["OPENDAL_FS_ROOT"] = "storage" + + # Start Redis container for caching and session management + # Redis is used for storing session data, cache entries, and temporary data + logger.info("Initializing Redis container...") + self.redis = RedisContainer(image="redis:latest", port=6379) + self.redis.start() + redis_host = self.redis.get_container_host_ip() + redis_port = self.redis.get_exposed_port(6379) + os.environ["REDIS_HOST"] = redis_host + os.environ["REDIS_PORT"] = str(redis_port) + logger.info("Redis container started successfully - Host: %s, Port: %s", redis_host, redis_port) + + # Wait for Redis to be ready + logger.info("Waiting for Redis to be ready to accept connections...") + wait_for_logs(self.redis, "Ready to accept connections", timeout=30) + logger.info("Redis container is ready and accepting connections") + + # Start Dify Sandbox container for code execution environment + # Dify Sandbox provides a secure environment for executing user code + logger.info("Initializing Dify Sandbox container...") + self.dify_sandbox = DockerContainer(image="langgenius/dify-sandbox:latest") + self.dify_sandbox.with_exposed_ports(8194) + self.dify_sandbox.env = { + "API_KEY": "test_api_key", + } + self.dify_sandbox.start() + sandbox_host = self.dify_sandbox.get_container_host_ip() + sandbox_port = self.dify_sandbox.get_exposed_port(8194) + os.environ["CODE_EXECUTION_ENDPOINT"] = f"http://{sandbox_host}:{sandbox_port}" + os.environ["CODE_EXECUTION_API_KEY"] = "test_api_key" + logger.info("Dify Sandbox container started successfully - Host: %s, Port: %s", sandbox_host, sandbox_port) + + # Wait for Dify Sandbox to be ready + logger.info("Waiting for Dify Sandbox to be ready to accept connections...") + wait_for_logs(self.dify_sandbox, "config init success", timeout=60) + logger.info("Dify Sandbox container is ready and accepting connections") + + self._containers_started = True + logger.info("All test containers started successfully") + + def stop_containers(self) -> None: + """ + Stop and clean up all test containers. + + This method ensures proper cleanup of all containers to prevent + resource leaks and conflicts between test runs. + """ + if not self._containers_started: + logger.info("No containers to stop - containers were not started") + return + + logger.info("Stopping and cleaning up test containers...") + containers = [self.redis, self.postgres, self.dify_sandbox] + for container in containers: + if container: + try: + container_name = container.image + logger.info("Stopping container: %s", container_name) + container.stop() + logger.info("Successfully stopped container: %s", container_name) + except Exception as e: + # Log error but don't fail the test cleanup + logger.warning("Failed to stop container %s: %s", container, e) + + self._containers_started = False + logger.info("All test containers stopped and cleaned up successfully") + + +# Global container manager instance +_container_manager = DifyTestContainers() + + +def _create_app_with_containers() -> Flask: + """ + Create Flask application configured to use test containers. + + This function creates a Flask application instance that is configured + to connect to the test containers instead of the default development + or production databases. + + Returns: + Flask: Configured Flask application for containerized testing + """ + logger.info("Creating Flask application with test container configuration...") + + # Re-create the config after environment variables have been set + from configs import dify_config + + # Force re-creation of config with new environment variables + dify_config.__dict__.clear() + dify_config.__init__() + + # Create and configure the Flask application + logger.info("Initializing Flask application...") + app = create_app() + logger.info("Flask application created successfully") + + # Initialize database schema + logger.info("Creating database schema...") + with app.app_context(): + db.create_all() + logger.info("Database schema created successfully") + + logger.info("Flask application configured and ready for testing") + return app + + +@pytest.fixture(scope="session") +def set_up_containers_and_env() -> Generator[DifyTestContainers, None, None]: + """ + Session-scoped fixture to manage test containers. + + This fixture ensures containers are started once per test session + and properly cleaned up when all tests are complete. This approach + improves test performance by reusing containers across multiple tests. + + Yields: + DifyTestContainers: Container manager instance + """ + logger.info("=== Starting test session container management ===") + _container_manager.start_containers_with_env() + logger.info("Test containers ready for session") + yield _container_manager + logger.info("=== Cleaning up test session containers ===") + _container_manager.stop_containers() + logger.info("Test session container cleanup completed") + + +@pytest.fixture(scope="session") +def flask_app_with_containers(set_up_containers_and_env) -> Flask: + """ + Session-scoped Flask application fixture using test containers. + + This fixture provides a Flask application instance that is configured + to use the test containers for all database and service connections. + + Args: + containers: Container manager fixture + + Returns: + Flask: Configured Flask application + """ + logger.info("=== Creating session-scoped Flask application ===") + app = _create_app_with_containers() + logger.info("Session-scoped Flask application created successfully") + return app + + +@pytest.fixture +def flask_req_ctx_with_containers(flask_app_with_containers) -> Generator[None, None, None]: + """ + Request context fixture for containerized Flask application. + + This fixture provides a Flask request context for tests that need + to interact with the Flask application within a request scope. + + Args: + flask_app_with_containers: Flask application fixture + + Yields: + None: Request context is active during yield + """ + logger.debug("Creating Flask request context...") + with flask_app_with_containers.test_request_context(): + logger.debug("Flask request context active") + yield + logger.debug("Flask request context closed") + + +@pytest.fixture +def test_client_with_containers(flask_app_with_containers) -> Generator[FlaskClient, None, None]: + """ + Test client fixture for containerized Flask application. + + This fixture provides a Flask test client that can be used to make + HTTP requests to the containerized application for integration testing. + + Args: + flask_app_with_containers: Flask application fixture + + Yields: + FlaskClient: Test client instance + """ + logger.debug("Creating Flask test client...") + with flask_app_with_containers.test_client() as client: + logger.debug("Flask test client ready") + yield client + logger.debug("Flask test client closed") + + +@pytest.fixture +def db_session_with_containers(flask_app_with_containers) -> Generator[Session, None, None]: + """ + Database session fixture for containerized testing. + + This fixture provides a SQLAlchemy database session that is connected + to the test PostgreSQL container, allowing tests to interact with + the database directly. + + Args: + flask_app_with_containers: Flask application fixture + + Yields: + Session: Database session instance + """ + logger.debug("Creating database session...") + with flask_app_with_containers.app_context(): + session = db.session() + logger.debug("Database session created and ready") + try: + yield session + finally: + session.close() + logger.debug("Database session closed") diff --git a/api/tests/test_containers_integration_tests/factories/__init__.py b/api/tests/test_containers_integration_tests/factories/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py b/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py new file mode 100644 index 0000000000..d6e14f3f54 --- /dev/null +++ b/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py @@ -0,0 +1,371 @@ +import unittest +from datetime import UTC, datetime +from typing import Optional +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from sqlalchemy.orm import Session + +from core.file import File, FileTransferMethod, FileType +from extensions.ext_database import db +from factories.file_factory import StorageKeyLoader +from models import ToolFile, UploadFile +from models.enums import CreatorUserRole + + +@pytest.mark.usefixtures("flask_req_ctx_with_containers") +class TestStorageKeyLoader(unittest.TestCase): + """ + Integration tests for StorageKeyLoader class. + + Tests the batched loading of storage keys from the database for files + with different transfer methods: LOCAL_FILE, REMOTE_URL, and TOOL_FILE. + """ + + def setUp(self): + """Set up test data before each test method.""" + self.session = db.session() + self.tenant_id = str(uuid4()) + self.user_id = str(uuid4()) + self.conversation_id = str(uuid4()) + + # Create test data that will be cleaned up after each test + self.test_upload_files = [] + self.test_tool_files = [] + + # Create StorageKeyLoader instance + self.loader = StorageKeyLoader(self.session, self.tenant_id) + + def tearDown(self): + """Clean up test data after each test method.""" + self.session.rollback() + + def _create_upload_file( + self, file_id: Optional[str] = None, storage_key: Optional[str] = None, tenant_id: Optional[str] = None + ) -> UploadFile: + """Helper method to create an UploadFile record for testing.""" + if file_id is None: + file_id = str(uuid4()) + if storage_key is None: + storage_key = f"test_storage_key_{uuid4()}" + if tenant_id is None: + tenant_id = self.tenant_id + + upload_file = UploadFile( + tenant_id=tenant_id, + storage_type="local", + key=storage_key, + name="test_file.txt", + size=1024, + extension=".txt", + mime_type="text/plain", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=self.user_id, + created_at=datetime.now(UTC), + used=False, + ) + upload_file.id = file_id + + self.session.add(upload_file) + self.session.flush() + self.test_upload_files.append(upload_file) + + return upload_file + + def _create_tool_file( + self, file_id: Optional[str] = None, file_key: Optional[str] = None, tenant_id: Optional[str] = None + ) -> ToolFile: + """Helper method to create a ToolFile record for testing.""" + if file_id is None: + file_id = str(uuid4()) + if file_key is None: + file_key = f"test_file_key_{uuid4()}" + if tenant_id is None: + tenant_id = self.tenant_id + + tool_file = ToolFile() + tool_file.id = file_id + tool_file.user_id = self.user_id + tool_file.tenant_id = tenant_id + tool_file.conversation_id = self.conversation_id + tool_file.file_key = file_key + tool_file.mimetype = "text/plain" + tool_file.original_url = "http://example.com/file.txt" + tool_file.name = "test_tool_file.txt" + tool_file.size = 2048 + + self.session.add(tool_file) + self.session.flush() + self.test_tool_files.append(tool_file) + + return tool_file + + def _create_file( + self, related_id: str, transfer_method: FileTransferMethod, tenant_id: Optional[str] = None + ) -> File: + """Helper method to create a File object for testing.""" + if tenant_id is None: + tenant_id = self.tenant_id + + # Set related_id for LOCAL_FILE and TOOL_FILE transfer methods + file_related_id = None + remote_url = None + + if transfer_method in (FileTransferMethod.LOCAL_FILE, FileTransferMethod.TOOL_FILE): + file_related_id = related_id + elif transfer_method == FileTransferMethod.REMOTE_URL: + remote_url = "https://example.com/test_file.txt" + file_related_id = related_id + + return File( + id=str(uuid4()), # Generate new UUID for File.id + tenant_id=tenant_id, + type=FileType.DOCUMENT, + transfer_method=transfer_method, + related_id=file_related_id, + remote_url=remote_url, + filename="test_file.txt", + extension=".txt", + mime_type="text/plain", + size=1024, + storage_key="initial_key", + ) + + def test_load_storage_keys_local_file(self): + """Test loading storage keys for LOCAL_FILE transfer method.""" + # Create test data + upload_file = self._create_upload_file() + file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + + # Load storage keys + self.loader.load_storage_keys([file]) + + # Verify storage key was loaded correctly + assert file._storage_key == upload_file.key + + def test_load_storage_keys_remote_url(self): + """Test loading storage keys for REMOTE_URL transfer method.""" + # Create test data + upload_file = self._create_upload_file() + file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.REMOTE_URL) + + # Load storage keys + self.loader.load_storage_keys([file]) + + # Verify storage key was loaded correctly + assert file._storage_key == upload_file.key + + def test_load_storage_keys_tool_file(self): + """Test loading storage keys for TOOL_FILE transfer method.""" + # Create test data + tool_file = self._create_tool_file() + file = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) + + # Load storage keys + self.loader.load_storage_keys([file]) + + # Verify storage key was loaded correctly + assert file._storage_key == tool_file.file_key + + def test_load_storage_keys_mixed_methods(self): + """Test batch loading with mixed transfer methods.""" + # Create test data for different transfer methods + upload_file1 = self._create_upload_file() + upload_file2 = self._create_upload_file() + tool_file = self._create_tool_file() + + file1 = self._create_file(related_id=upload_file1.id, transfer_method=FileTransferMethod.LOCAL_FILE) + file2 = self._create_file(related_id=upload_file2.id, transfer_method=FileTransferMethod.REMOTE_URL) + file3 = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) + + files = [file1, file2, file3] + + # Load storage keys + self.loader.load_storage_keys(files) + + # Verify all storage keys were loaded correctly + assert file1._storage_key == upload_file1.key + assert file2._storage_key == upload_file2.key + assert file3._storage_key == tool_file.file_key + + def test_load_storage_keys_empty_list(self): + """Test with empty file list.""" + # Should not raise any exceptions + self.loader.load_storage_keys([]) + + def test_load_storage_keys_tenant_mismatch(self): + """Test tenant_id validation.""" + # Create file with different tenant_id + upload_file = self._create_upload_file() + file = self._create_file( + related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=str(uuid4()) + ) + + # Should raise ValueError for tenant mismatch + with pytest.raises(ValueError) as context: + self.loader.load_storage_keys([file]) + + assert "invalid file, expected tenant_id" in str(context.value) + + def test_load_storage_keys_missing_file_id(self): + """Test with None file.related_id.""" + # Create a file with valid parameters first, then manually set related_id to None + file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE) + file.related_id = None + + # Should raise ValueError for None file related_id + with pytest.raises(ValueError) as context: + self.loader.load_storage_keys([file]) + + assert str(context.value) == "file id should not be None." + + def test_load_storage_keys_nonexistent_upload_file_records(self): + """Test with missing UploadFile database records.""" + # Create file with non-existent upload file id + non_existent_id = str(uuid4()) + file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.LOCAL_FILE) + + # Should raise ValueError for missing record + with pytest.raises(ValueError): + self.loader.load_storage_keys([file]) + + def test_load_storage_keys_nonexistent_tool_file_records(self): + """Test with missing ToolFile database records.""" + # Create file with non-existent tool file id + non_existent_id = str(uuid4()) + file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.TOOL_FILE) + + # Should raise ValueError for missing record + with pytest.raises(ValueError): + self.loader.load_storage_keys([file]) + + def test_load_storage_keys_invalid_uuid(self): + """Test with invalid UUID format.""" + # Create a file with valid parameters first, then manually set invalid related_id + file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE) + file.related_id = "invalid-uuid-format" + + # Should raise ValueError for invalid UUID + with pytest.raises(ValueError): + self.loader.load_storage_keys([file]) + + def test_load_storage_keys_batch_efficiency(self): + """Test batched operations use efficient queries.""" + # Create multiple files of different types + upload_files = [self._create_upload_file() for _ in range(3)] + tool_files = [self._create_tool_file() for _ in range(2)] + + files = [] + files.extend( + [self._create_file(related_id=uf.id, transfer_method=FileTransferMethod.LOCAL_FILE) for uf in upload_files] + ) + files.extend( + [self._create_file(related_id=tf.id, transfer_method=FileTransferMethod.TOOL_FILE) for tf in tool_files] + ) + + # Mock the session to count queries + with patch.object(self.session, "scalars", wraps=self.session.scalars) as mock_scalars: + self.loader.load_storage_keys(files) + + # Should make exactly 2 queries (one for upload_files, one for tool_files) + assert mock_scalars.call_count == 2 + + # Verify all storage keys were loaded correctly + for i, file in enumerate(files[:3]): + assert file._storage_key == upload_files[i].key + for i, file in enumerate(files[3:]): + assert file._storage_key == tool_files[i].file_key + + def test_load_storage_keys_tenant_isolation(self): + """Test that tenant isolation works correctly.""" + # Create files for different tenants + other_tenant_id = str(uuid4()) + + # Create upload file for current tenant + upload_file_current = self._create_upload_file() + file_current = self._create_file( + related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE + ) + + # Create upload file for other tenant (but don't add to cleanup list) + upload_file_other = UploadFile( + tenant_id=other_tenant_id, + storage_type="local", + key="other_tenant_key", + name="other_file.txt", + size=1024, + extension=".txt", + mime_type="text/plain", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=self.user_id, + created_at=datetime.now(UTC), + used=False, + ) + upload_file_other.id = str(uuid4()) + self.session.add(upload_file_other) + self.session.flush() + + # Create file for other tenant but try to load with current tenant's loader + file_other = self._create_file( + related_id=upload_file_other.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id + ) + + # Should raise ValueError due to tenant mismatch + with pytest.raises(ValueError) as context: + self.loader.load_storage_keys([file_other]) + + assert "invalid file, expected tenant_id" in str(context.value) + + # Current tenant's file should still work + self.loader.load_storage_keys([file_current]) + assert file_current._storage_key == upload_file_current.key + + def test_load_storage_keys_mixed_tenant_batch(self): + """Test batch with mixed tenant files (should fail on first mismatch).""" + # Create files for current tenant + upload_file_current = self._create_upload_file() + file_current = self._create_file( + related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE + ) + + # Create file for different tenant + other_tenant_id = str(uuid4()) + file_other = self._create_file( + related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id + ) + + # Should raise ValueError on tenant mismatch + with pytest.raises(ValueError) as context: + self.loader.load_storage_keys([file_current, file_other]) + + assert "invalid file, expected tenant_id" in str(context.value) + + def test_load_storage_keys_duplicate_file_ids(self): + """Test handling of duplicate file IDs in the batch.""" + # Create upload file + upload_file = self._create_upload_file() + + # Create two File objects with same related_id + file1 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + file2 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + + # Should handle duplicates gracefully + self.loader.load_storage_keys([file1, file2]) + + # Both files should have the same storage key + assert file1._storage_key == upload_file.key + assert file2._storage_key == upload_file.key + + def test_load_storage_keys_session_isolation(self): + """Test that the loader uses the provided session correctly.""" + # Create test data + upload_file = self._create_upload_file() + file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + + # Create loader with different session (same underlying connection) + + with Session(bind=db.engine) as other_session: + other_loader = StorageKeyLoader(other_session, self.tenant_id) + with pytest.raises(ValueError): + other_loader.load_storage_keys([file]) diff --git a/api/tests/test_containers_integration_tests/workflow/__init__.py b/api/tests/test_containers_integration_tests/workflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/__init__.py b/api/tests/test_containers_integration_tests/workflow/nodes/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/__init__.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_executor.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_executor.py new file mode 100644 index 0000000000..487178ff58 --- /dev/null +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_executor.py @@ -0,0 +1,11 @@ +import pytest + +from core.helper.code_executor.code_executor import CodeExecutionError, CodeExecutor + +CODE_LANGUAGE = "unsupported_language" + + +def test_unsupported_with_code_template(): + with pytest.raises(CodeExecutionError) as e: + CodeExecutor.execute_workflow_code_template(language=CODE_LANGUAGE, code="", inputs={}) + assert str(e.value) == f"Unsupported language {CODE_LANGUAGE}" diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py new file mode 100644 index 0000000000..19a41b6186 --- /dev/null +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py @@ -0,0 +1,47 @@ +from textwrap import dedent + +from .test_utils import CodeExecutorTestMixin + + +class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): + """Test class for JavaScript code executor functionality.""" + + def test_javascript_plain(self, flask_app_with_containers): + """Test basic JavaScript code execution with console.log output""" + CodeExecutor, CodeLanguage = self.code_executor_imports + + code = 'console.log("Hello World")' + result_message = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) + assert result_message == "Hello World\n" + + def test_javascript_json(self, flask_app_with_containers): + """Test JavaScript code execution with JSON output""" + CodeExecutor, CodeLanguage = self.code_executor_imports + + code = dedent(""" + obj = {'Hello': 'World'} + console.log(JSON.stringify(obj)) + """) + result = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) + assert result == '{"Hello":"World"}\n' + + def test_javascript_with_code_template(self, flask_app_with_containers): + """Test JavaScript workflow code template execution with inputs""" + CodeExecutor, CodeLanguage = self.code_executor_imports + JavascriptCodeProvider, _ = self.javascript_imports + + result = CodeExecutor.execute_workflow_code_template( + language=CodeLanguage.JAVASCRIPT, + code=JavascriptCodeProvider.get_default_code(), + inputs={"arg1": "Hello", "arg2": "World"}, + ) + assert result == {"result": "HelloWorld"} + + def test_javascript_get_runner_script(self, flask_app_with_containers): + """Test JavaScript template transformer runner script generation""" + _, NodeJsTemplateTransformer = self.javascript_imports + + runner_script = NodeJsTemplateTransformer.get_runner_script() + assert runner_script.count(NodeJsTemplateTransformer._code_placeholder) == 1 + assert runner_script.count(NodeJsTemplateTransformer._inputs_placeholder) == 1 + assert runner_script.count(NodeJsTemplateTransformer._result_tag) == 2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py new file mode 100644 index 0000000000..c764801170 --- /dev/null +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -0,0 +1,42 @@ +import base64 + +from .test_utils import CodeExecutorTestMixin + + +class TestJinja2CodeExecutor(CodeExecutorTestMixin): + """Test class for Jinja2 code executor functionality.""" + + def test_jinja2(self, flask_app_with_containers): + """Test basic Jinja2 template execution with variable substitution""" + CodeExecutor, CodeLanguage = self.code_executor_imports + _, Jinja2TemplateTransformer = self.jinja2_imports + + template = "Hello {{template}}" + inputs = base64.b64encode(b'{"template": "World"}').decode("utf-8") + code = ( + Jinja2TemplateTransformer.get_runner_script() + .replace(Jinja2TemplateTransformer._code_placeholder, template) + .replace(Jinja2TemplateTransformer._inputs_placeholder, inputs) + ) + result = CodeExecutor.execute_code( + language=CodeLanguage.JINJA2, preload=Jinja2TemplateTransformer.get_preload_script(), code=code + ) + assert result == "<>Hello World<>\n" + + def test_jinja2_with_code_template(self, flask_app_with_containers): + """Test Jinja2 workflow code template execution with inputs""" + CodeExecutor, CodeLanguage = self.code_executor_imports + + result = CodeExecutor.execute_workflow_code_template( + language=CodeLanguage.JINJA2, code="Hello {{template}}", inputs={"template": "World"} + ) + assert result == {"result": "Hello World"} + + def test_jinja2_get_runner_script(self, flask_app_with_containers): + """Test Jinja2 template transformer runner script generation""" + _, Jinja2TemplateTransformer = self.jinja2_imports + + runner_script = Jinja2TemplateTransformer.get_runner_script() + assert runner_script.count(Jinja2TemplateTransformer._code_placeholder) == 1 + assert runner_script.count(Jinja2TemplateTransformer._inputs_placeholder) == 1 + assert runner_script.count(Jinja2TemplateTransformer._result_tag) == 2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py new file mode 100644 index 0000000000..6d93df2472 --- /dev/null +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py @@ -0,0 +1,47 @@ +from textwrap import dedent + +from .test_utils import CodeExecutorTestMixin + + +class TestPython3CodeExecutor(CodeExecutorTestMixin): + """Test class for Python3 code executor functionality.""" + + def test_python3_plain(self, flask_app_with_containers): + """Test basic Python3 code execution with print output""" + CodeExecutor, CodeLanguage = self.code_executor_imports + + code = 'print("Hello World")' + result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) + assert result == "Hello World\n" + + def test_python3_json(self, flask_app_with_containers): + """Test Python3 code execution with JSON output""" + CodeExecutor, CodeLanguage = self.code_executor_imports + + code = dedent(""" + import json + print(json.dumps({'Hello': 'World'})) + """) + result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) + assert result == '{"Hello": "World"}\n' + + def test_python3_with_code_template(self, flask_app_with_containers): + """Test Python3 workflow code template execution with inputs""" + CodeExecutor, CodeLanguage = self.code_executor_imports + Python3CodeProvider, _ = self.python3_imports + + result = CodeExecutor.execute_workflow_code_template( + language=CodeLanguage.PYTHON3, + code=Python3CodeProvider.get_default_code(), + inputs={"arg1": "Hello", "arg2": "World"}, + ) + assert result == {"result": "HelloWorld"} + + def test_python3_get_runner_script(self, flask_app_with_containers): + """Test Python3 template transformer runner script generation""" + _, Python3TemplateTransformer = self.python3_imports + + runner_script = Python3TemplateTransformer.get_runner_script() + assert runner_script.count(Python3TemplateTransformer._code_placeholder) == 1 + assert runner_script.count(Python3TemplateTransformer._inputs_placeholder) == 1 + assert runner_script.count(Python3TemplateTransformer._result_tag) == 2 diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_utils.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_utils.py new file mode 100644 index 0000000000..35a095b049 --- /dev/null +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_utils.py @@ -0,0 +1,115 @@ +""" +Test utilities for code executor integration tests. + +This module provides lazy import functions to avoid module loading issues +that occur when modules are imported before the flask_app_with_containers fixture +has set up the proper environment variables and configuration. +""" + +import importlib +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + pass + + +def force_reload_code_executor(): + """ + Force reload the code_executor module to reinitialize code_execution_endpoint_url. + + This function should be called after setting up environment variables + to ensure the code_execution_endpoint_url is initialized with the correct value. + """ + try: + import core.helper.code_executor.code_executor + + importlib.reload(core.helper.code_executor.code_executor) + except Exception as e: + # Log the error but don't fail the test + print(f"Warning: Failed to reload code_executor module: {e}") + + +def get_code_executor_imports(): + """ + Lazy import function for core CodeExecutor classes. + + Returns: + tuple: (CodeExecutor, CodeLanguage) classes + """ + from core.helper.code_executor.code_executor import CodeExecutor, CodeLanguage + + return CodeExecutor, CodeLanguage + + +def get_javascript_imports(): + """ + Lazy import function for JavaScript-specific modules. + + Returns: + tuple: (JavascriptCodeProvider, NodeJsTemplateTransformer) classes + """ + from core.helper.code_executor.javascript.javascript_code_provider import JavascriptCodeProvider + from core.helper.code_executor.javascript.javascript_transformer import NodeJsTemplateTransformer + + return JavascriptCodeProvider, NodeJsTemplateTransformer + + +def get_python3_imports(): + """ + Lazy import function for Python3-specific modules. + + Returns: + tuple: (Python3CodeProvider, Python3TemplateTransformer) classes + """ + from core.helper.code_executor.python3.python3_code_provider import Python3CodeProvider + from core.helper.code_executor.python3.python3_transformer import Python3TemplateTransformer + + return Python3CodeProvider, Python3TemplateTransformer + + +def get_jinja2_imports(): + """ + Lazy import function for Jinja2-specific modules. + + Returns: + tuple: (None, Jinja2TemplateTransformer) classes + """ + from core.helper.code_executor.jinja2.jinja2_transformer import Jinja2TemplateTransformer + + return None, Jinja2TemplateTransformer + + +class CodeExecutorTestMixin: + """ + Mixin class providing lazy import methods for code executor tests. + + This mixin helps avoid module loading issues by deferring imports + until after the flask_app_with_containers fixture has set up the environment. + """ + + def setup_method(self): + """ + Setup method called before each test method. + Force reload the code_executor module to ensure fresh initialization. + """ + force_reload_code_executor() + + @property + def code_executor_imports(self): + """Property to get CodeExecutor and CodeLanguage classes.""" + return get_code_executor_imports() + + @property + def javascript_imports(self): + """Property to get JavaScript-specific classes.""" + return get_javascript_imports() + + @property + def python3_imports(self): + """Property to get Python3-specific classes.""" + return get_python3_imports() + + @property + def jinja2_imports(self): + """Property to get Jinja2-specific classes.""" + return get_jinja2_imports() diff --git a/api/uv.lock b/api/uv.lock index 0bce38812e..4dced728ac 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1318,6 +1318,7 @@ dev = [ { name = "pytest-mock" }, { name = "ruff" }, { name = "scipy-stubs" }, + { name = "testcontainers" }, { name = "types-aiofiles" }, { name = "types-beautifulsoup4" }, { name = "types-cachetools" }, @@ -1500,6 +1501,7 @@ dev = [ { name = "pytest-mock", specifier = "~=3.14.0" }, { name = "ruff", specifier = "~=0.12.3" }, { name = "scipy-stubs", specifier = ">=1.15.3.0" }, + { name = "testcontainers", specifier = "~=4.10.0" }, { name = "types-aiofiles", specifier = "~=24.1.0" }, { name = "types-beautifulsoup4", specifier = "~=4.12.0" }, { name = "types-cachetools", specifier = "~=5.5.0" }, @@ -1600,6 +1602,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, ] +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, +] + [[package]] name = "docstring-parser" version = "0.16" @@ -5468,6 +5484,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, ] +[[package]] +name = "testcontainers" +version = "4.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docker" }, + { name = "python-dotenv" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/49/9c618aff1c50121d183cdfbc3a4a5cf2727a2cde1893efe6ca55c7009196/testcontainers-4.10.0.tar.gz", hash = "sha256:03f85c3e505d8b4edeb192c72a961cebbcba0dd94344ae778b4a159cb6dcf8d3", size = 63327, upload-time = "2025-04-02T16:13:27.582Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/0a/824b0c1ecf224802125279c3effff2e25ed785ed046e67da6e53d928de4c/testcontainers-4.10.0-py3-none-any.whl", hash = "sha256:31ed1a81238c7e131a2a29df6db8f23717d892b592fa5a1977fd0dcd0c23fc23", size = 107414, upload-time = "2025-04-02T16:13:25.785Z" }, +] + [[package]] name = "tidb-vector" version = "0.0.9" diff --git a/dev/pytest/pytest_all_tests.sh b/dev/pytest/pytest_all_tests.sh index 30898b4fcf..9123b2f8ad 100755 --- a/dev/pytest/pytest_all_tests.sh +++ b/dev/pytest/pytest_all_tests.sh @@ -15,3 +15,6 @@ dev/pytest/pytest_workflow.sh # Unit tests dev/pytest/pytest_unit_tests.sh + +# TestContainers tests +dev/pytest/pytest_testcontainers.sh diff --git a/dev/pytest/pytest_testcontainers.sh b/dev/pytest/pytest_testcontainers.sh new file mode 100755 index 0000000000..e55a436138 --- /dev/null +++ b/dev/pytest/pytest_testcontainers.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -x + +SCRIPT_DIR="$(dirname "$(realpath "$0")")" +cd "$SCRIPT_DIR/../.." + +pytest api/tests/test_containers_integration_tests From a724f356722ec6049be7017c0aa5e43ca43f8617 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 4 Aug 2025 20:20:43 +0800 Subject: [PATCH 133/415] fix: fetchAppWithTags may return empty when apps is over 100 (#23350) --- api/fields/app_fields.py | 8 ++++--- .../(appDetailLayout)/[appId]/layout-main.tsx | 14 ++---------- web/service/apps.ts | 22 +++++-------------- 3 files changed, 13 insertions(+), 31 deletions(-) diff --git a/api/fields/app_fields.py b/api/fields/app_fields.py index b6d85e0e24..1a5fcabf97 100644 --- a/api/fields/app_fields.py +++ b/api/fields/app_fields.py @@ -59,6 +59,8 @@ model_config_fields = { "updated_at": TimestampField, } +tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String} + app_detail_fields = { "id": fields.String, "name": fields.String, @@ -77,6 +79,7 @@ app_detail_fields = { "updated_by": fields.String, "updated_at": TimestampField, "access_mode": fields.String, + "tags": fields.List(fields.Nested(tag_fields)), } prompt_config_fields = { @@ -92,8 +95,6 @@ model_config_partial_fields = { "updated_at": TimestampField, } -tag_fields = {"id": fields.String, "name": fields.String, "type": fields.String} - app_partial_fields = { "id": fields.String, "name": fields.String, @@ -185,7 +186,6 @@ app_detail_fields_with_site = { "enable_api": fields.Boolean, "model_config": fields.Nested(model_config_fields, attribute="app_model_config", allow_null=True), "workflow": fields.Nested(workflow_partial_fields, allow_null=True), - "site": fields.Nested(site_fields), "api_base_url": fields.String, "use_icon_as_answer_icon": fields.Boolean, "max_active_requests": fields.Integer, @@ -195,6 +195,8 @@ app_detail_fields_with_site = { "updated_at": TimestampField, "deleted_tools": fields.List(fields.Nested(deleted_tool_fields)), "access_mode": fields.String, + "tags": fields.List(fields.Nested(tag_fields)), + "site": fields.Nested(site_fields), } diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx index 47d5be29dd..6d337e3c47 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx @@ -20,7 +20,7 @@ import cn from '@/utils/classnames' import { useStore } from '@/app/components/app/store' import AppSideBar from '@/app/components/app-sidebar' import type { NavIcon } from '@/app/components/app-sidebar/navLink' -import { fetchAppDetail, fetchAppWithTags } from '@/service/apps' +import { fetchAppDetailDirect } from '@/service/apps' import { useAppContext } from '@/context/app-context' import Loading from '@/app/components/base/loading' import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints' @@ -118,17 +118,7 @@ const AppDetailLayout: FC = (props) => { useEffect(() => { setAppDetail() setIsLoadingAppDetail(true) - fetchAppDetail({ url: '/apps', id: appId }).then(async (res) => { - if (!res.tags || res.tags.length === 0) { - try { - const appWithTags = await fetchAppWithTags(appId) - if (appWithTags?.tags) - res.tags = appWithTags.tags - } - catch (error) { - // Fallback failed, continue with empty tags - } - } + fetchAppDetailDirect({ url: '/apps', id: appId }).then((res: App) => { setAppDetailRes(res) }).catch((e: any) => { if (e.status === 404) diff --git a/web/service/apps.ts b/web/service/apps.ts index 3fdcf44667..1d7b0bccdb 100644 --- a/web/service/apps.ts +++ b/web/service/apps.ts @@ -9,7 +9,12 @@ export const fetchAppList: Fetcher(url, { params }) } -export const fetchAppDetail = ({ url, id }: { url: string; id: string }) => { +export const fetchAppDetail: Fetcher = ({ url, id }) => { + return get(`${url}/${id}`) +} + +// Direct API call function for non-SWR usage +export const fetchAppDetailDirect = async ({ url, id }: { url: string; id: string }): Promise => { return get(`${url}/${id}`) } @@ -60,21 +65,6 @@ export const deleteApp: Fetcher = (appID) => { return del(`apps/${appID}`) } -export const fetchAppWithTags = async (appID: string) => { - try { - const appListResponse = await fetchAppList({ - url: '/apps', - params: { page: 1, limit: 100 }, - }) - const appWithTags = appListResponse.data.find(app => app.id === appID) - return appWithTags || null - } - catch (error) { - console.warn('Failed to fetch app with tags:', error) - return null - } -} - export const updateAppSiteStatus: Fetcher }> = ({ url, body }) => { return post(url, { body }) } From d8584dc03a69c19c5479bd898182e3cad2c06d55 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Tue, 5 Aug 2025 10:19:47 +0800 Subject: [PATCH 134/415] feat: enhance document list navigation and sorting functionality (#23383) --- .../document-detail-navigation-fix.test.tsx | 305 ++++++++++++++++++ web/__tests__/document-list-sorting.test.tsx | 83 +++++ web/__tests__/navigation-utils.test.ts | 290 +++++++++++++++++ .../datasets/documents/detail/index.tsx | 7 +- .../components/datasets/documents/list.tsx | 104 ++++-- web/models/common.ts | 12 + web/service/knowledge/use-document.ts | 4 +- web/utils/navigation.ts | 189 +++++++++++ 8 files changed, 966 insertions(+), 28 deletions(-) create mode 100644 web/__tests__/document-detail-navigation-fix.test.tsx create mode 100644 web/__tests__/document-list-sorting.test.tsx create mode 100644 web/__tests__/navigation-utils.test.ts create mode 100644 web/utils/navigation.ts diff --git a/web/__tests__/document-detail-navigation-fix.test.tsx b/web/__tests__/document-detail-navigation-fix.test.tsx new file mode 100644 index 0000000000..200ed09ea9 --- /dev/null +++ b/web/__tests__/document-detail-navigation-fix.test.tsx @@ -0,0 +1,305 @@ +/** + * Document Detail Navigation Fix Verification Test + * + * This test specifically validates that the backToPrev function in the document detail + * component correctly preserves pagination and filter states. + */ + +import { fireEvent, render, screen } from '@testing-library/react' +import { useRouter } from 'next/navigation' +import { useDocumentDetail, useDocumentMetadata } from '@/service/knowledge/use-document' + +// Mock Next.js router +const mockPush = jest.fn() +jest.mock('next/navigation', () => ({ + useRouter: jest.fn(() => ({ + push: mockPush, + })), +})) + +// Mock the document service hooks +jest.mock('@/service/knowledge/use-document', () => ({ + useDocumentDetail: jest.fn(), + useDocumentMetadata: jest.fn(), + useInvalidDocumentList: jest.fn(() => jest.fn()), +})) + +// Mock other dependencies +jest.mock('@/context/dataset-detail', () => ({ + useDatasetDetailContext: jest.fn(() => [null]), +})) + +jest.mock('@/service/use-base', () => ({ + useInvalid: jest.fn(() => jest.fn()), +})) + +jest.mock('@/service/knowledge/use-segment', () => ({ + useSegmentListKey: jest.fn(), + useChildSegmentListKey: jest.fn(), +})) + +// Create a minimal version of the DocumentDetail component that includes our fix +const DocumentDetailWithFix = ({ datasetId, documentId }: { datasetId: string; documentId: string }) => { + const router = useRouter() + + // This is the FIXED implementation from detail/index.tsx + const backToPrev = () => { + // Preserve pagination and filter states when navigating back + const searchParams = new URLSearchParams(window.location.search) + const queryString = searchParams.toString() + const separator = queryString ? '?' : '' + const backPath = `/datasets/${datasetId}/documents${separator}${queryString}` + router.push(backPath) + } + + return ( +
    + +
    + Dataset: {datasetId}, Document: {documentId} +
    +
    + ) +} + +describe('Document Detail Navigation Fix Verification', () => { + beforeEach(() => { + jest.clearAllMocks() + + // Mock successful API responses + ;(useDocumentDetail as jest.Mock).mockReturnValue({ + data: { + id: 'doc-123', + name: 'Test Document', + display_status: 'available', + enabled: true, + archived: false, + }, + error: null, + }) + + ;(useDocumentMetadata as jest.Mock).mockReturnValue({ + data: null, + error: null, + }) + }) + + describe('Query Parameter Preservation', () => { + test('preserves pagination state (page 3, limit 25)', () => { + // Simulate user coming from page 3 with 25 items per page + Object.defineProperty(window, 'location', { + value: { + search: '?page=3&limit=25', + }, + writable: true, + }) + + render() + + // User clicks back button + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // Should preserve the pagination state + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents?page=3&limit=25') + + console.log('✅ Pagination state preserved: page=3&limit=25') + }) + + test('preserves search keyword and filters', () => { + // Simulate user with search and filters applied + Object.defineProperty(window, 'location', { + value: { + search: '?page=2&limit=10&keyword=API%20documentation&status=active', + }, + writable: true, + }) + + render() + + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // Should preserve all query parameters + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents?page=2&limit=10&keyword=API+documentation&status=active') + + console.log('✅ Search and filters preserved') + }) + + test('handles complex query parameters with special characters', () => { + // Test with complex query string including encoded characters + Object.defineProperty(window, 'location', { + value: { + search: '?page=1&limit=50&keyword=test%20%26%20debug&sort=name&order=desc&filter=%7B%22type%22%3A%22pdf%22%7D', + }, + writable: true, + }) + + render() + + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // URLSearchParams will normalize the encoding, but preserve all parameters + const expectedCall = mockPush.mock.calls[0][0] + expect(expectedCall).toMatch(/^\/datasets\/dataset-123\/documents\?/) + expect(expectedCall).toMatch(/page=1/) + expect(expectedCall).toMatch(/limit=50/) + expect(expectedCall).toMatch(/keyword=test/) + expect(expectedCall).toMatch(/sort=name/) + expect(expectedCall).toMatch(/order=desc/) + + console.log('✅ Complex query parameters handled:', expectedCall) + }) + + test('handles empty query parameters gracefully', () => { + // No query parameters in URL + Object.defineProperty(window, 'location', { + value: { + search: '', + }, + writable: true, + }) + + render() + + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // Should navigate to clean documents URL + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents') + + console.log('✅ Empty parameters handled gracefully') + }) + }) + + describe('Different Dataset IDs', () => { + test('works with different dataset identifiers', () => { + Object.defineProperty(window, 'location', { + value: { + search: '?page=5&limit=10', + }, + writable: true, + }) + + // Test with different dataset ID format + render() + + fireEvent.click(screen.getByTestId('back-button-fixed')) + + expect(mockPush).toHaveBeenCalledWith('/datasets/ds-prod-2024-001/documents?page=5&limit=10') + + console.log('✅ Works with different dataset ID formats') + }) + }) + + describe('Real User Scenarios', () => { + test('scenario: user searches, goes to page 3, views document, clicks back', () => { + // User searched for "API" and navigated to page 3 + Object.defineProperty(window, 'location', { + value: { + search: '?keyword=API&page=3&limit=10', + }, + writable: true, + }) + + render() + + // User decides to go back to continue browsing + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // Should return to page 3 of API search results + expect(mockPush).toHaveBeenCalledWith('/datasets/main-dataset/documents?keyword=API&page=3&limit=10') + + console.log('✅ Real user scenario: search + pagination preserved') + }) + + test('scenario: user applies multiple filters, goes to document, returns', () => { + // User has applied multiple filters and is on page 2 + Object.defineProperty(window, 'location', { + value: { + search: '?page=2&limit=25&status=active&type=pdf&sort=created_at&order=desc', + }, + writable: true, + }) + + render() + + fireEvent.click(screen.getByTestId('back-button-fixed')) + + // All filters should be preserved + expect(mockPush).toHaveBeenCalledWith('/datasets/filtered-dataset/documents?page=2&limit=25&status=active&type=pdf&sort=created_at&order=desc') + + console.log('✅ Complex filtering scenario preserved') + }) + }) + + describe('Error Handling and Edge Cases', () => { + test('handles malformed query parameters gracefully', () => { + // Test with potentially problematic query string + Object.defineProperty(window, 'location', { + value: { + search: '?page=invalid&limit=&keyword=test&=emptykey&malformed', + }, + writable: true, + }) + + render() + + // Should not throw errors + expect(() => { + fireEvent.click(screen.getByTestId('back-button-fixed')) + }).not.toThrow() + + // Should still attempt navigation (URLSearchParams will clean up the parameters) + expect(mockPush).toHaveBeenCalled() + const navigationPath = mockPush.mock.calls[0][0] + expect(navigationPath).toMatch(/^\/datasets\/dataset-123\/documents/) + + console.log('✅ Malformed parameters handled gracefully:', navigationPath) + }) + + test('handles very long query strings', () => { + // Test with a very long query string + const longKeyword = 'a'.repeat(1000) + Object.defineProperty(window, 'location', { + value: { + search: `?page=1&keyword=${longKeyword}`, + }, + writable: true, + }) + + render() + + expect(() => { + fireEvent.click(screen.getByTestId('back-button-fixed')) + }).not.toThrow() + + expect(mockPush).toHaveBeenCalled() + + console.log('✅ Long query strings handled') + }) + }) + + describe('Performance Verification', () => { + test('navigation function executes quickly', () => { + Object.defineProperty(window, 'location', { + value: { + search: '?page=1&limit=10&keyword=test', + }, + writable: true, + }) + + render() + + const startTime = performance.now() + fireEvent.click(screen.getByTestId('back-button-fixed')) + const endTime = performance.now() + + const executionTime = endTime - startTime + + // Should execute in less than 10ms + expect(executionTime).toBeLessThan(10) + + console.log(`⚡ Navigation execution time: ${executionTime.toFixed(2)}ms`) + }) + }) +}) diff --git a/web/__tests__/document-list-sorting.test.tsx b/web/__tests__/document-list-sorting.test.tsx new file mode 100644 index 0000000000..1510dbec23 --- /dev/null +++ b/web/__tests__/document-list-sorting.test.tsx @@ -0,0 +1,83 @@ +/** + * Document List Sorting Tests + */ + +describe('Document List Sorting', () => { + const mockDocuments = [ + { id: '1', name: 'Beta.pdf', word_count: 500, hit_count: 10, created_at: 1699123456 }, + { id: '2', name: 'Alpha.txt', word_count: 200, hit_count: 25, created_at: 1699123400 }, + { id: '3', name: 'Gamma.docx', word_count: 800, hit_count: 5, created_at: 1699123500 }, + ] + + const sortDocuments = (docs: any[], field: string, order: 'asc' | 'desc') => { + return [...docs].sort((a, b) => { + let aValue: any + let bValue: any + + switch (field) { + case 'name': + aValue = a.name?.toLowerCase() || '' + bValue = b.name?.toLowerCase() || '' + break + case 'word_count': + aValue = a.word_count || 0 + bValue = b.word_count || 0 + break + case 'hit_count': + aValue = a.hit_count || 0 + bValue = b.hit_count || 0 + break + case 'created_at': + aValue = a.created_at + bValue = b.created_at + break + default: + return 0 + } + + if (field === 'name') { + const result = aValue.localeCompare(bValue) + return order === 'asc' ? result : -result + } + else { + const result = aValue - bValue + return order === 'asc' ? result : -result + } + }) + } + + test('sorts by name descending (default for UI consistency)', () => { + const sorted = sortDocuments(mockDocuments, 'name', 'desc') + expect(sorted.map(doc => doc.name)).toEqual(['Gamma.docx', 'Beta.pdf', 'Alpha.txt']) + }) + + test('sorts by name ascending (after toggle)', () => { + const sorted = sortDocuments(mockDocuments, 'name', 'asc') + expect(sorted.map(doc => doc.name)).toEqual(['Alpha.txt', 'Beta.pdf', 'Gamma.docx']) + }) + + test('sorts by word_count descending', () => { + const sorted = sortDocuments(mockDocuments, 'word_count', 'desc') + expect(sorted.map(doc => doc.word_count)).toEqual([800, 500, 200]) + }) + + test('sorts by hit_count descending', () => { + const sorted = sortDocuments(mockDocuments, 'hit_count', 'desc') + expect(sorted.map(doc => doc.hit_count)).toEqual([25, 10, 5]) + }) + + test('sorts by created_at descending (newest first)', () => { + const sorted = sortDocuments(mockDocuments, 'created_at', 'desc') + expect(sorted.map(doc => doc.created_at)).toEqual([1699123500, 1699123456, 1699123400]) + }) + + test('handles empty values correctly', () => { + const docsWithEmpty = [ + { id: '1', name: 'Test', word_count: 100, hit_count: 5, created_at: 1699123456 }, + { id: '2', name: 'Empty', word_count: 0, hit_count: 0, created_at: 1699123400 }, + ] + + const sorted = sortDocuments(docsWithEmpty, 'word_count', 'desc') + expect(sorted.map(doc => doc.word_count)).toEqual([100, 0]) + }) +}) diff --git a/web/__tests__/navigation-utils.test.ts b/web/__tests__/navigation-utils.test.ts new file mode 100644 index 0000000000..9a388505d6 --- /dev/null +++ b/web/__tests__/navigation-utils.test.ts @@ -0,0 +1,290 @@ +/** + * Navigation Utilities Test + * + * Tests for the navigation utility functions to ensure they handle + * query parameter preservation correctly across different scenarios. + */ + +import { + createBackNavigation, + createNavigationPath, + createNavigationPathWithParams, + datasetNavigation, + extractQueryParams, + mergeQueryParams, +} from '@/utils/navigation' + +// Mock router for testing +const mockPush = jest.fn() +const mockRouter = { push: mockPush } + +describe('Navigation Utilities', () => { + beforeEach(() => { + jest.clearAllMocks() + }) + + describe('createNavigationPath', () => { + test('preserves query parameters by default', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10&keyword=test' }, + writable: true, + }) + + const path = createNavigationPath('/datasets/123/documents') + expect(path).toBe('/datasets/123/documents?page=3&limit=10&keyword=test') + }) + + test('returns clean path when preserveParams is false', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10' }, + writable: true, + }) + + const path = createNavigationPath('/datasets/123/documents', false) + expect(path).toBe('/datasets/123/documents') + }) + + test('handles empty query parameters', () => { + Object.defineProperty(window, 'location', { + value: { search: '' }, + writable: true, + }) + + const path = createNavigationPath('/datasets/123/documents') + expect(path).toBe('/datasets/123/documents') + }) + + test('handles errors gracefully', () => { + // Mock window.location to throw an error + Object.defineProperty(window, 'location', { + get: () => { + throw new Error('Location access denied') + }, + configurable: true, + }) + + const consoleSpy = jest.spyOn(console, 'warn').mockImplementation() + const path = createNavigationPath('/datasets/123/documents') + + expect(path).toBe('/datasets/123/documents') + expect(consoleSpy).toHaveBeenCalledWith('Failed to preserve query parameters:', expect.any(Error)) + + consoleSpy.mockRestore() + }) + }) + + describe('createBackNavigation', () => { + test('creates function that navigates with preserved params', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=2&limit=25' }, + writable: true, + }) + + const backFn = createBackNavigation(mockRouter, '/datasets/123/documents') + backFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/123/documents?page=2&limit=25') + }) + + test('creates function that navigates without params when specified', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=2&limit=25' }, + writable: true, + }) + + const backFn = createBackNavigation(mockRouter, '/datasets/123/documents', false) + backFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/123/documents') + }) + }) + + describe('extractQueryParams', () => { + test('extracts specified parameters', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10&keyword=test&other=value' }, + writable: true, + }) + + const params = extractQueryParams(['page', 'limit', 'keyword']) + expect(params).toEqual({ + page: '3', + limit: '10', + keyword: 'test', + }) + }) + + test('handles missing parameters', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3' }, + writable: true, + }) + + const params = extractQueryParams(['page', 'limit', 'missing']) + expect(params).toEqual({ + page: '3', + }) + }) + + test('handles errors gracefully', () => { + Object.defineProperty(window, 'location', { + get: () => { + throw new Error('Location access denied') + }, + configurable: true, + }) + + const consoleSpy = jest.spyOn(console, 'warn').mockImplementation() + const params = extractQueryParams(['page', 'limit']) + + expect(params).toEqual({}) + expect(consoleSpy).toHaveBeenCalledWith('Failed to extract query parameters:', expect.any(Error)) + + consoleSpy.mockRestore() + }) + }) + + describe('createNavigationPathWithParams', () => { + test('creates path with specified parameters', () => { + const path = createNavigationPathWithParams('/datasets/123/documents', { + page: 1, + limit: 25, + keyword: 'search term', + }) + + expect(path).toBe('/datasets/123/documents?page=1&limit=25&keyword=search+term') + }) + + test('filters out empty values', () => { + const path = createNavigationPathWithParams('/datasets/123/documents', { + page: 1, + limit: '', + keyword: 'test', + empty: null, + undefined, + }) + + expect(path).toBe('/datasets/123/documents?page=1&keyword=test') + }) + + test('handles errors gracefully', () => { + // Mock URLSearchParams to throw an error + const originalURLSearchParams = globalThis.URLSearchParams + globalThis.URLSearchParams = jest.fn(() => { + throw new Error('URLSearchParams error') + }) as any + + const consoleSpy = jest.spyOn(console, 'warn').mockImplementation() + const path = createNavigationPathWithParams('/datasets/123/documents', { page: 1 }) + + expect(path).toBe('/datasets/123/documents') + expect(consoleSpy).toHaveBeenCalledWith('Failed to create navigation path with params:', expect.any(Error)) + + consoleSpy.mockRestore() + globalThis.URLSearchParams = originalURLSearchParams + }) + }) + + describe('mergeQueryParams', () => { + test('merges new params with existing ones', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10' }, + writable: true, + }) + + const merged = mergeQueryParams({ keyword: 'test', page: '1' }) + const result = merged.toString() + + expect(result).toContain('page=1') // overridden + expect(result).toContain('limit=10') // preserved + expect(result).toContain('keyword=test') // added + }) + + test('removes parameters when value is null', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10&keyword=test' }, + writable: true, + }) + + const merged = mergeQueryParams({ keyword: null, filter: 'active' }) + const result = merged.toString() + + expect(result).toContain('page=3') + expect(result).toContain('limit=10') + expect(result).not.toContain('keyword') + expect(result).toContain('filter=active') + }) + + test('creates fresh params when preserveExisting is false', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=3&limit=10' }, + writable: true, + }) + + const merged = mergeQueryParams({ keyword: 'test' }, false) + const result = merged.toString() + + expect(result).toBe('keyword=test') + }) + }) + + describe('datasetNavigation', () => { + test('backToDocuments creates correct navigation function', () => { + Object.defineProperty(window, 'location', { + value: { search: '?page=2&limit=25' }, + writable: true, + }) + + const backFn = datasetNavigation.backToDocuments(mockRouter, 'dataset-123') + backFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents?page=2&limit=25') + }) + + test('toDocumentDetail creates correct navigation function', () => { + const detailFn = datasetNavigation.toDocumentDetail(mockRouter, 'dataset-123', 'doc-456') + detailFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents/doc-456') + }) + + test('toDocumentSettings creates correct navigation function', () => { + const settingsFn = datasetNavigation.toDocumentSettings(mockRouter, 'dataset-123', 'doc-456') + settingsFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/dataset-123/documents/doc-456/settings') + }) + }) + + describe('Real-world Integration Scenarios', () => { + test('complete user workflow: list -> detail -> back', () => { + // User starts on page 3 with search + Object.defineProperty(window, 'location', { + value: { search: '?page=3&keyword=API&limit=25' }, + writable: true, + }) + + // Create back navigation function (as would be done in detail component) + const backToDocuments = datasetNavigation.backToDocuments(mockRouter, 'main-dataset') + + // User clicks back + backToDocuments() + + // Should return to exact same list state + expect(mockPush).toHaveBeenCalledWith('/datasets/main-dataset/documents?page=3&keyword=API&limit=25') + }) + + test('user applies filters then views document', () => { + // Complex filter state + Object.defineProperty(window, 'location', { + value: { search: '?page=1&limit=50&status=active&type=pdf&sort=created_at&order=desc' }, + writable: true, + }) + + const backFn = createBackNavigation(mockRouter, '/datasets/filtered-set/documents') + backFn() + + expect(mockPush).toHaveBeenCalledWith('/datasets/filtered-set/documents?page=1&limit=50&status=active&type=pdf&sort=created_at&order=desc') + }) + }) +}) diff --git a/web/app/components/datasets/documents/detail/index.tsx b/web/app/components/datasets/documents/detail/index.tsx index 79d12e47e3..aa3ba6a27b 100644 --- a/web/app/components/datasets/documents/detail/index.tsx +++ b/web/app/components/datasets/documents/detail/index.tsx @@ -139,7 +139,12 @@ const DocumentDetail: FC = ({ datasetId, documentId }) => { }) const backToPrev = () => { - router.push(`/datasets/${datasetId}/documents`) + // Preserve pagination and filter states when navigating back + const searchParams = new URLSearchParams(window.location.search) + const queryString = searchParams.toString() + const separator = queryString ? '?' : '' + const backPath = `/datasets/${datasetId}/documents${separator}${queryString}` + router.push(backPath) } const isDetailLoading = !documentDetail && !error diff --git a/web/app/components/datasets/documents/list.tsx b/web/app/components/datasets/documents/list.tsx index abfa578138..83effd446c 100644 --- a/web/app/components/datasets/documents/list.tsx +++ b/web/app/components/datasets/documents/list.tsx @@ -18,7 +18,6 @@ import { import { useContext } from 'use-context-selector' import { useRouter } from 'next/navigation' import { useTranslation } from 'react-i18next' -import dayjs from 'dayjs' import { Globe01 } from '../../base/icons/src/vender/line/mapsAndTravel' import ChunkingModeLabel from '../common/chunking-mode-label' import FileTypeIcon from '../../base/file-uploader/file-type-icon' @@ -99,7 +98,6 @@ export const StatusItem: FC<{ const { mutateAsync: enableDocument } = useDocumentEnable() const { mutateAsync: disableDocument } = useDocumentDisable() const { mutateAsync: deleteDocument } = useDocumentDelete() - const downloadDocument = useDocumentDownload() const onOperate = async (operationName: OperationName) => { let opApi = deleteDocument @@ -313,9 +311,9 @@ export const OperationAction: FC<{ downloadDocument.mutateAsync({ datasetId, documentId: detail.id, - }).then((response) => { - if (response.download_url) - window.location.href = response.download_url + }).then((response) => { + if (response.download_url) + window.location.href = response.download_url }).catch((error) => { console.error(error) notify({ type: 'error', message: t('common.actionMsg.downloadFailed') }) @@ -478,7 +476,8 @@ const DocumentList: FC = ({ const isGeneralMode = chunkingMode !== ChunkingMode.parentChild const isQAMode = chunkingMode === ChunkingMode.qa const [localDocs, setLocalDocs] = useState(documents) - const [enableSort, setEnableSort] = useState(true) + const [sortField, setSortField] = useState<'name' | 'word_count' | 'hit_count' | 'created_at' | null>('created_at') + const [sortOrder, setSortOrder] = useState<'asc' | 'desc'>('desc') const { isShowEditModal, showEditModal, @@ -493,18 +492,74 @@ const DocumentList: FC = ({ }) useEffect(() => { - setLocalDocs(documents) - }, [documents]) - - const onClickSort = () => { - setEnableSort(!enableSort) - if (enableSort) { - const sortedDocs = [...localDocs].sort((a, b) => dayjs(a.created_at).isBefore(dayjs(b.created_at)) ? -1 : 1) - setLocalDocs(sortedDocs) - } - else { + if (!sortField) { setLocalDocs(documents) + return } + + const sortedDocs = [...documents].sort((a, b) => { + let aValue: any + let bValue: any + + switch (sortField) { + case 'name': + aValue = a.name?.toLowerCase() || '' + bValue = b.name?.toLowerCase() || '' + break + case 'word_count': + aValue = a.word_count || 0 + bValue = b.word_count || 0 + break + case 'hit_count': + aValue = a.hit_count || 0 + bValue = b.hit_count || 0 + break + case 'created_at': + aValue = a.created_at + bValue = b.created_at + break + default: + return 0 + } + + if (sortField === 'name') { + const result = aValue.localeCompare(bValue) + return sortOrder === 'asc' ? result : -result + } + else { + const result = aValue - bValue + return sortOrder === 'asc' ? result : -result + } + }) + + setLocalDocs(sortedDocs) + }, [documents, sortField, sortOrder]) + + const handleSort = (field: 'name' | 'word_count' | 'hit_count' | 'created_at') => { + if (sortField === field) { + setSortOrder(sortOrder === 'asc' ? 'desc' : 'asc') + } + else { + setSortField(field) + setSortOrder('desc') + } + } + + const renderSortHeader = (field: 'name' | 'word_count' | 'hit_count' | 'created_at', label: string) => { + const isActive = sortField === field + const isDesc = isActive && sortOrder === 'desc' + + return ( +
    handleSort(field)}> + {label} + +
    + ) } const [currDocument, setCurrDocument] = useState(null) @@ -586,18 +641,17 @@ const DocumentList: FC = ({
    - - + + diff --git a/web/models/common.ts b/web/models/common.ts index 867f4cf8fe..92aa263717 100644 --- a/web/models/common.ts +++ b/web/models/common.ts @@ -5,6 +5,18 @@ export type CommonResponse = { result: 'success' | 'fail' } +export type FileDownloadResponse = { + id: string + name: string + size: number + extension: string + url: string + download_url: string + mime_type: string + created_by: string + created_at: number +} + export type OauthResponse = { redirect_url: string } diff --git a/web/service/knowledge/use-document.ts b/web/service/knowledge/use-document.ts index e7f11f600d..3d6e322552 100644 --- a/web/service/knowledge/use-document.ts +++ b/web/service/knowledge/use-document.ts @@ -8,7 +8,7 @@ import type { MetadataType, SortType } from '../datasets' import { pauseDocIndexing, resumeDocIndexing } from '../datasets' import type { DocumentDetailResponse, DocumentListResponse, UpdateDocumentBatchParams } from '@/models/datasets' import { DocumentActionType } from '@/models/datasets' -import type { CommonResponse } from '@/models/common' +import type { CommonResponse, FileDownloadResponse } from '@/models/common' // Download document with authentication (sends Authorization header) import Toast from '@/app/components/base/toast' @@ -102,7 +102,7 @@ export const useDocumentDownload = () => { return useMutation({ mutationFn: async ({ datasetId, documentId }: { datasetId: string; documentId: string }) => { // The get helper automatically adds the Authorization header from localStorage - return get(`/datasets/${datasetId}/documents/${documentId}/upload-file`) + return get(`/datasets/${datasetId}/documents/${documentId}/upload-file`) }, onError: (error: any) => { // Show a toast notification if download fails diff --git a/web/utils/navigation.ts b/web/utils/navigation.ts new file mode 100644 index 0000000000..fec2291f3f --- /dev/null +++ b/web/utils/navigation.ts @@ -0,0 +1,189 @@ +/** + * Navigation Utilities + * + * Provides helper functions for consistent navigation behavior throughout the application, + * specifically for preserving query parameters when navigating between related pages. + */ + +/** + * Creates a navigation path that preserves current URL query parameters + * + * @param basePath - The base path to navigate to (e.g., '/datasets/123/documents') + * @param preserveParams - Whether to preserve current query parameters (default: true) + * @returns The complete navigation path with preserved query parameters + * + * @example + * // Current URL: /datasets/123/documents/456?page=3&limit=10&keyword=test + * const backPath = createNavigationPath('/datasets/123/documents') + * // Returns: '/datasets/123/documents?page=3&limit=10&keyword=test' + * + * @example + * // Navigate without preserving params + * const cleanPath = createNavigationPath('/datasets/123/documents', false) + * // Returns: '/datasets/123/documents' + */ +export function createNavigationPath(basePath: string, preserveParams: boolean = true): string { + if (!preserveParams) + return basePath + + try { + const searchParams = new URLSearchParams(window.location.search) + const queryString = searchParams.toString() + const separator = queryString ? '?' : '' + return `${basePath}${separator}${queryString}` + } + catch (error) { + // Fallback to base path if there's any error accessing location + console.warn('Failed to preserve query parameters:', error) + return basePath + } +} + +/** + * Creates a back navigation function that preserves query parameters + * + * @param router - Next.js router instance + * @param basePath - The base path to navigate back to + * @param preserveParams - Whether to preserve current query parameters (default: true) + * @returns A function that navigates back with preserved parameters + * + * @example + * const router = useRouter() + * const backToPrev = createBackNavigation(router, `/datasets/${datasetId}/documents`) + * + * // Later, when user clicks back: + * backToPrev() + */ +export function createBackNavigation( + router: { push: (path: string) => void }, + basePath: string, + preserveParams: boolean = true, +): () => void { + return () => { + const navigationPath = createNavigationPath(basePath, preserveParams) + router.push(navigationPath) + } +} + +/** + * Extracts specific query parameters from current URL + * + * @param paramNames - Array of parameter names to extract + * @returns Object with extracted parameters + * + * @example + * // Current URL: /page?page=3&limit=10&keyword=test&other=value + * const params = extractQueryParams(['page', 'limit', 'keyword']) + * // Returns: { page: '3', limit: '10', keyword: 'test' } + */ +export function extractQueryParams(paramNames: string[]): Record { + try { + const searchParams = new URLSearchParams(window.location.search) + const extracted: Record = {} + + paramNames.forEach((name) => { + const value = searchParams.get(name) + if (value !== null) + extracted[name] = value + }) + + return extracted + } + catch (error) { + console.warn('Failed to extract query parameters:', error) + return {} + } +} + +/** + * Creates a navigation path with specific query parameters + * + * @param basePath - The base path + * @param params - Object of query parameters to include + * @returns Navigation path with specified parameters + * + * @example + * const path = createNavigationPathWithParams('/datasets/123/documents', { + * page: '1', + * limit: '25', + * keyword: 'search term' + * }) + * // Returns: '/datasets/123/documents?page=1&limit=25&keyword=search+term' + */ +export function createNavigationPathWithParams( + basePath: string, + params: Record, +): string { + try { + const searchParams = new URLSearchParams() + + Object.entries(params).forEach(([key, value]) => { + if (value !== undefined && value !== null && value !== '') + searchParams.set(key, String(value)) + }) + + const queryString = searchParams.toString() + const separator = queryString ? '?' : '' + return `${basePath}${separator}${queryString}` + } + catch (error) { + console.warn('Failed to create navigation path with params:', error) + return basePath + } +} + +/** + * Merges current query parameters with new ones + * + * @param newParams - New parameters to add or override + * @param preserveExisting - Whether to preserve existing parameters (default: true) + * @returns URLSearchParams object with merged parameters + * + * @example + * // Current URL: /page?page=3&limit=10 + * const merged = mergeQueryParams({ keyword: 'test', page: '1' }) + * // Results in: page=1&limit=10&keyword=test (page overridden, limit preserved, keyword added) + */ +export function mergeQueryParams( + newParams: Record, + preserveExisting: boolean = true, +): URLSearchParams { + const searchParams = preserveExisting + ? new URLSearchParams(window.location.search) + : new URLSearchParams() + + Object.entries(newParams).forEach(([key, value]) => { + if (value === null || value === undefined) + searchParams.delete(key) + else if (value !== '') + searchParams.set(key, String(value)) + }) + + return searchParams +} + +/** + * Navigation utilities for common dataset/document patterns + */ +export const datasetNavigation = { + /** + * Creates navigation back to dataset documents list with preserved state + */ + backToDocuments: (router: { push: (path: string) => void }, datasetId: string) => { + return createBackNavigation(router, `/datasets/${datasetId}/documents`) + }, + + /** + * Creates navigation to document detail + */ + toDocumentDetail: (router: { push: (path: string) => void }, datasetId: string, documentId: string) => { + return () => router.push(`/datasets/${datasetId}/documents/${documentId}`) + }, + + /** + * Creates navigation to document settings + */ + toDocumentSettings: (router: { push: (path: string) => void }, datasetId: string, documentId: string) => { + return () => router.push(`/datasets/${datasetId}/documents/${documentId}/settings`) + }, +} From 7fe23a0ca65562b2b067296c712daa84087f9660 Mon Sep 17 00:00:00 2001 From: Will Date: Tue, 5 Aug 2025 10:20:28 +0800 Subject: [PATCH 135/415] remove useless Tool class attributes (#23389) --- api/core/tools/__base/tool.py | 3 --- api/core/tools/builtin_tool/tool.py | 2 -- api/core/tools/custom_tool/tool.py | 3 --- api/core/tools/mcp_tool/tool.py | 9 +-------- api/core/tools/plugin_tool/tool.py | 7 +------ api/core/tools/utils/dataset_retriever_tool.py | 2 -- api/core/tools/workflow_as_tool/tool.py | 9 --------- 7 files changed, 2 insertions(+), 33 deletions(-) diff --git a/api/core/tools/__base/tool.py b/api/core/tools/__base/tool.py index 35e16b5c8f..d6961cdaa4 100644 --- a/api/core/tools/__base/tool.py +++ b/api/core/tools/__base/tool.py @@ -20,9 +20,6 @@ class Tool(ABC): The base class of a tool """ - entity: ToolEntity - runtime: ToolRuntime - def __init__(self, entity: ToolEntity, runtime: ToolRuntime) -> None: self.entity = entity self.runtime = runtime diff --git a/api/core/tools/builtin_tool/tool.py b/api/core/tools/builtin_tool/tool.py index 724a2291c6..84efefba07 100644 --- a/api/core/tools/builtin_tool/tool.py +++ b/api/core/tools/builtin_tool/tool.py @@ -20,8 +20,6 @@ class BuiltinTool(Tool): :param meta: the meta data of a tool call processing """ - provider: str - def __init__(self, provider: str, **kwargs): super().__init__(**kwargs) self.provider = provider diff --git a/api/core/tools/custom_tool/tool.py b/api/core/tools/custom_tool/tool.py index 10653b9948..333ef2834c 100644 --- a/api/core/tools/custom_tool/tool.py +++ b/api/core/tools/custom_tool/tool.py @@ -21,9 +21,6 @@ API_TOOL_DEFAULT_TIMEOUT = ( class ApiTool(Tool): - api_bundle: ApiToolBundle - provider_id: str - """ Api tool """ diff --git a/api/core/tools/mcp_tool/tool.py b/api/core/tools/mcp_tool/tool.py index d1bacbc735..8ebbb6b0fe 100644 --- a/api/core/tools/mcp_tool/tool.py +++ b/api/core/tools/mcp_tool/tool.py @@ -8,23 +8,16 @@ from core.mcp.mcp_client import MCPClient from core.mcp.types import ImageContent, TextContent from core.tools.__base.tool import Tool from core.tools.__base.tool_runtime import ToolRuntime -from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, ToolParameter, ToolProviderType +from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, ToolProviderType class MCPTool(Tool): - tenant_id: str - icon: str - runtime_parameters: Optional[list[ToolParameter]] - server_url: str - provider_id: str - def __init__( self, entity: ToolEntity, runtime: ToolRuntime, tenant_id: str, icon: str, server_url: str, provider_id: str ) -> None: super().__init__(entity, runtime) self.tenant_id = tenant_id self.icon = icon - self.runtime_parameters = None self.server_url = server_url self.provider_id = provider_id diff --git a/api/core/tools/plugin_tool/tool.py b/api/core/tools/plugin_tool/tool.py index aef2677c36..db38c10e81 100644 --- a/api/core/tools/plugin_tool/tool.py +++ b/api/core/tools/plugin_tool/tool.py @@ -9,11 +9,6 @@ from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, Too class PluginTool(Tool): - tenant_id: str - icon: str - plugin_unique_identifier: str - runtime_parameters: Optional[list[ToolParameter]] - def __init__( self, entity: ToolEntity, runtime: ToolRuntime, tenant_id: str, icon: str, plugin_unique_identifier: str ) -> None: @@ -21,7 +16,7 @@ class PluginTool(Tool): self.tenant_id = tenant_id self.icon = icon self.plugin_unique_identifier = plugin_unique_identifier - self.runtime_parameters = None + self.runtime_parameters: Optional[list[ToolParameter]] = None def tool_provider_type(self) -> ToolProviderType: return ToolProviderType.PLUGIN diff --git a/api/core/tools/utils/dataset_retriever_tool.py b/api/core/tools/utils/dataset_retriever_tool.py index ec0575f6c3..d58807e29f 100644 --- a/api/core/tools/utils/dataset_retriever_tool.py +++ b/api/core/tools/utils/dataset_retriever_tool.py @@ -20,8 +20,6 @@ from core.tools.utils.dataset_retriever.dataset_retriever_base_tool import Datas class DatasetRetrieverTool(Tool): - retrieval_tool: DatasetRetrieverBaseTool - def __init__(self, entity: ToolEntity, runtime: ToolRuntime, retrieval_tool: DatasetRetrieverBaseTool) -> None: super().__init__(entity, runtime) self.retrieval_tool = retrieval_tool diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index db6b84082f..6824e5e0e8 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -25,15 +25,6 @@ logger = logging.getLogger(__name__) class WorkflowTool(Tool): - workflow_app_id: str - version: str - workflow_entities: dict[str, Any] - workflow_call_depth: int - thread_pool_id: Optional[str] = None - workflow_as_tool_id: str - - label: str - """ Workflow tool. """ From ab78e1208996c523676062ac0870a2a1d3e0f150 Mon Sep 17 00:00:00 2001 From: znn Date: Tue, 5 Aug 2025 08:06:48 +0530 Subject: [PATCH 136/415] enhancing logging (#23332) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/plugin/impl/base.py | 1 + api/core/plugin/impl/exc.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/plugin/impl/base.py b/api/core/plugin/impl/base.py index 7375726fa9..6f32498b42 100644 --- a/api/core/plugin/impl/base.py +++ b/api/core/plugin/impl/base.py @@ -208,6 +208,7 @@ class BasePluginClient: except Exception: raise PluginDaemonInnerError(code=rep.code, message=rep.message) + logger.error("Error in stream reponse for plugin %s", rep.__dict__) self._handle_plugin_daemon_error(error.error_type, error.message) raise ValueError(f"plugin daemon: {rep.message}, code: {rep.code}") if rep.data is None: diff --git a/api/core/plugin/impl/exc.py b/api/core/plugin/impl/exc.py index 8b660c807d..8ecc2e2147 100644 --- a/api/core/plugin/impl/exc.py +++ b/api/core/plugin/impl/exc.py @@ -2,6 +2,8 @@ from collections.abc import Mapping from pydantic import TypeAdapter +from extensions.ext_logging import get_request_id + class PluginDaemonError(Exception): """Base class for all plugin daemon errors.""" @@ -11,7 +13,7 @@ class PluginDaemonError(Exception): def __str__(self) -> str: # returns the class name and description - return f"{self.__class__.__name__}: {self.description}" + return f"req_id: {get_request_id()} {self.__class__.__name__}: {self.description}" class PluginDaemonInternalError(PluginDaemonError): From 0cee57accad6e30728afee96bb6bd22f9d6a82b1 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Mon, 4 Aug 2025 19:43:51 -0700 Subject: [PATCH 137/415] chore: add Template (#23395) --- .github/ISSUE_TEMPLATE/chore.yaml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/chore.yaml diff --git a/.github/ISSUE_TEMPLATE/chore.yaml b/.github/ISSUE_TEMPLATE/chore.yaml new file mode 100644 index 0000000000..953399bf77 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/chore.yaml @@ -0,0 +1,27 @@ +name: "✨ Enhancement / Refactor" +description: Suggest an enhancement or request a code refactor. +title: "[Enhancement/Refactor] " +labels: + - refactor +body: + - type: textarea + id: description + attributes: + label: Description + placeholder: "Describe the enhancement or refactor you are proposing." + validations: + required: true + - type: textarea + id: motivation + attributes: + label: Motivation + placeholder: "Why is this enhancement or refactor needed?" + validations: + required: false + - type: textarea + id: additional-context + attributes: + label: Additional Context + placeholder: "Add any other context or screenshots about the request here." + validations: + required: false From b946378b385fb8d33911d726d1fdfdf5877cce93 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Tue, 5 Aug 2025 11:01:31 +0800 Subject: [PATCH 138/415] fix: installed apps preview language error (#23397) --- .../components/base/chat/chat-with-history/hooks.tsx | 7 +++++-- .../components/base/chat/embedded-chatbot/hooks.tsx | 6 +++--- web/app/components/share/text-generation/index.tsx | 2 +- web/i18n-config/i18next-config.ts | 10 +++++----- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/web/app/components/base/chat/chat-with-history/hooks.tsx b/web/app/components/base/chat/chat-with-history/hooks.tsx index e88d28879b..0f437c82b7 100644 --- a/web/app/components/base/chat/chat-with-history/hooks.tsx +++ b/web/app/components/base/chat/chat-with-history/hooks.tsx @@ -115,8 +115,11 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => { }, []) useEffect(() => { - if (appData?.site.default_language) - changeLanguage(appData.site.default_language) + const setLocaleFromProps = async () => { + if (appData?.site.default_language) + await changeLanguage(appData.site.default_language) + } + setLocaleFromProps() }, [appData]) const [sidebarCollapseState, setSidebarCollapseState] = useState(false) diff --git a/web/app/components/base/chat/embedded-chatbot/hooks.tsx b/web/app/components/base/chat/embedded-chatbot/hooks.tsx index 4e86ad50e4..d7983dc599 100644 --- a/web/app/components/base/chat/embedded-chatbot/hooks.tsx +++ b/web/app/components/base/chat/embedded-chatbot/hooks.tsx @@ -101,15 +101,15 @@ export const useEmbeddedChatbot = () => { if (localeParam) { // If locale parameter exists in URL, use it instead of default - changeLanguage(localeParam) + await changeLanguage(localeParam) } else if (localeFromSysVar) { // If locale is set as a system variable, use that - changeLanguage(localeFromSysVar) + await changeLanguage(localeFromSysVar) } else if (appInfo?.site.default_language) { // Otherwise use the default from app config - changeLanguage(appInfo.site.default_language) + await changeLanguage(appInfo.site.default_language) } } diff --git a/web/app/components/share/text-generation/index.tsx b/web/app/components/share/text-generation/index.tsx index 1f0367dc66..4a6d1c9965 100644 --- a/web/app/components/share/text-generation/index.tsx +++ b/web/app/components/share/text-generation/index.tsx @@ -371,7 +371,7 @@ const TextGeneration: FC = ({ setAppId(appId) setSiteInfo(siteInfo as SiteInfo) setCustomConfig(custom_config) - changeLanguage(siteInfo.default_language) + await changeLanguage(siteInfo.default_language) const { user_input_form, more_like_this, file_upload, text_to_speech }: any = appParams setVisionConfig({ diff --git a/web/i18n-config/i18next-config.ts b/web/i18n-config/i18next-config.ts index b26d0afdbe..19ac59ebb4 100644 --- a/web/i18n-config/i18next-config.ts +++ b/web/i18n-config/i18next-config.ts @@ -87,11 +87,11 @@ if (!i18n.isInitialized) { } export const changeLanguage = async (lng?: string) => { - const resolvedLng = lng ?? 'en-US' - const resource = await loadLangResources(resolvedLng) - if (!i18n.hasResourceBundle(resolvedLng, 'translation')) - i18n.addResourceBundle(resolvedLng, 'translation', resource, true, true) - await i18n.changeLanguage(resolvedLng) + if (!lng) return + const resource = await loadLangResources(lng) + if (!i18n.hasResourceBundle(lng, 'translation')) + i18n.addResourceBundle(lng, 'translation', resource, true, true) + await i18n.changeLanguage(lng) } export default i18n From 75f722a959f2ddfc97a37047e89a580e049dd557 Mon Sep 17 00:00:00 2001 From: Matri Qi Date: Tue, 5 Aug 2025 11:12:30 +0800 Subject: [PATCH 139/415] lint: fix issue of no-unused-vars (#23375) --- web/app/(commonLayout)/datasets/create/page.tsx | 4 +--- .../app/annotation/header-opts/index.tsx | 3 ++- .../components/workflow/nodes/answer/utils.ts | 2 +- .../components/workflow/nodes/assigner/utils.ts | 2 +- web/app/components/workflow/nodes/end/utils.ts | 2 +- .../components/condition-list/condition-item.tsx | 2 +- web/app/components/workflow/nodes/llm/panel.tsx | 2 +- web/app/components/workflow/nodes/llm/utils.ts | 4 ++-- .../components/workflow/nodes/loop/use-config.ts | 7 +------ web/app/components/workflow/nodes/start/utils.ts | 2 +- .../workflow/nodes/template-transform/utils.ts | 2 +- web/app/components/workflow/nodes/tool/utils.ts | 2 +- .../store/workflow/debug/inspect-vars-slice.ts | 16 ++++++++-------- web/eslint.config.mjs | 1 + 14 files changed, 23 insertions(+), 28 deletions(-) diff --git a/web/app/(commonLayout)/datasets/create/page.tsx b/web/app/(commonLayout)/datasets/create/page.tsx index 663a830665..50fd1f5a19 100644 --- a/web/app/(commonLayout)/datasets/create/page.tsx +++ b/web/app/(commonLayout)/datasets/create/page.tsx @@ -1,9 +1,7 @@ import React from 'react' import DatasetUpdateForm from '@/app/components/datasets/create' -type Props = {} - -const DatasetCreation = async (props: Props) => { +const DatasetCreation = async () => { return ( ) diff --git a/web/app/components/app/annotation/header-opts/index.tsx b/web/app/components/app/annotation/header-opts/index.tsx index 463ae58ac2..7347caa2f9 100644 --- a/web/app/components/app/annotation/header-opts/index.tsx +++ b/web/app/components/app/annotation/header-opts/index.tsx @@ -88,7 +88,8 @@ const HeaderOptions: FC = ({ await clearAllAnnotations(appId) onAdded() } - catch (_) { + catch (e) { + console.error(`failed to clear all annotations, ${e}`) } finally { setShowClearConfirm(false) diff --git a/web/app/components/workflow/nodes/answer/utils.ts b/web/app/components/workflow/nodes/answer/utils.ts index 8c3424815c..a77faa5c79 100644 --- a/web/app/components/workflow/nodes/answer/utils.ts +++ b/web/app/components/workflow/nodes/answer/utils.ts @@ -1,5 +1,5 @@ import type { AnswerNodeType } from './types' -export const checkNodeValid = (payload: AnswerNodeType) => { +export const checkNodeValid = (_payload: AnswerNodeType) => { return true } diff --git a/web/app/components/workflow/nodes/assigner/utils.ts b/web/app/components/workflow/nodes/assigner/utils.ts index c9fe123779..d5177787bf 100644 --- a/web/app/components/workflow/nodes/assigner/utils.ts +++ b/web/app/components/workflow/nodes/assigner/utils.ts @@ -1,7 +1,7 @@ import type { AssignerNodeType } from './types' import { AssignerNodeInputType, WriteMode } from './types' -export const checkNodeValid = (payload: AssignerNodeType) => { +export const checkNodeValid = (_payload: AssignerNodeType) => { return true } diff --git a/web/app/components/workflow/nodes/end/utils.ts b/web/app/components/workflow/nodes/end/utils.ts index f214d30c52..6ca87aa235 100644 --- a/web/app/components/workflow/nodes/end/utils.ts +++ b/web/app/components/workflow/nodes/end/utils.ts @@ -1,5 +1,5 @@ import type { EndNodeType } from './types' -export const checkNodeValid = (payload: EndNodeType) => { +export const checkNodeValid = (_payload: EndNodeType) => { return true } diff --git a/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx b/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx index eabc10b168..d20bd31125 100644 --- a/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx +++ b/web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx @@ -202,7 +202,7 @@ const ConditionItem = ({ onRemoveCondition?.(caseId, condition.id) }, [caseId, condition, conditionId, isSubVariableKey, onRemoveCondition, onRemoveSubVariableCondition]) - const handleVarChange = useCallback((valueSelector: ValueSelector, varItem: Var) => { + const handleVarChange = useCallback((valueSelector: ValueSelector, _varItem: Var) => { const resolvedVarType = getVarType({ valueSelector, availableNodes, diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index b475429265..1206e58734 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -82,7 +82,7 @@ const Panel: FC> = ({ Toast.notify({ type: 'warning', message: `${t('common.modelProvider.parametersInvalidRemoved')}: ${keys.map(k => `${k} (${removedDetails[k]})`).join(', ')}` }) handleCompletionParamsChange(filtered) } - catch (e) { + catch { Toast.notify({ type: 'error', message: t('common.error') }) handleCompletionParamsChange({}) } diff --git a/web/app/components/workflow/nodes/llm/utils.ts b/web/app/components/workflow/nodes/llm/utils.ts index b29646de66..fd943d1fa3 100644 --- a/web/app/components/workflow/nodes/llm/utils.ts +++ b/web/app/components/workflow/nodes/llm/utils.ts @@ -5,7 +5,7 @@ import { Validator } from 'jsonschema' import produce from 'immer' import { z } from 'zod' -export const checkNodeValid = (payload: LLMNodeType) => { +export const checkNodeValid = (_payload: LLMNodeType) => { return true } @@ -280,7 +280,7 @@ const validator = new Validator() export const validateSchemaAgainstDraft7 = (schemaToValidate: any) => { const schema = produce(schemaToValidate, (draft: any) => { - // Make sure the schema has the $schema property for draft-07 + // Make sure the schema has the $schema property for draft-07 if (!draft.$schema) draft.$schema = 'http://json-schema.org/draft-07/schema#' }) diff --git a/web/app/components/workflow/nodes/loop/use-config.ts b/web/app/components/workflow/nodes/loop/use-config.ts index 965fe2b395..4c6e07c9c0 100644 --- a/web/app/components/workflow/nodes/loop/use-config.ts +++ b/web/app/components/workflow/nodes/loop/use-config.ts @@ -6,7 +6,6 @@ import produce from 'immer' import { v4 as uuid4 } from 'uuid' import { useIsChatMode, - useIsNodeInLoop, useNodesReadOnly, useWorkflow, } from '../../hooks' @@ -20,10 +19,8 @@ import type { HandleAddCondition, HandleAddSubVariableCondition, HandleRemoveCon import useIsVarFileAttribute from './use-is-var-file-attribute' import { useStore } from '@/app/components/workflow/store' -const DELIMITER = '@@@@@' const useConfig = (id: string, payload: LoopNodeType) => { const { nodesReadOnly: readOnly } = useNodesReadOnly() - const { isNodeInLoop } = useIsNodeInLoop(id) const isChatMode = useIsChatMode() const conversationVariables = useStore(s => s.conversationVariables) @@ -39,10 +36,8 @@ const useConfig = (id: string, payload: LoopNodeType) => { }, []) // output - const { getLoopNodeChildren, getBeforeNodesInSameBranch } = useWorkflow() - const beforeNodes = getBeforeNodesInSameBranch(id) + const { getLoopNodeChildren } = useWorkflow() const loopChildrenNodes = [{ id, data: payload } as any, ...getLoopNodeChildren(id)] - const canChooseVarNodes = [...beforeNodes, ...loopChildrenNodes] const childrenNodeVars = toNodeOutputVars(loopChildrenNodes, isChatMode, undefined, [], conversationVariables) const { diff --git a/web/app/components/workflow/nodes/start/utils.ts b/web/app/components/workflow/nodes/start/utils.ts index 037b52a163..16743d6178 100644 --- a/web/app/components/workflow/nodes/start/utils.ts +++ b/web/app/components/workflow/nodes/start/utils.ts @@ -1,5 +1,5 @@ import type { StartNodeType } from './types' -export const checkNodeValid = (payload: StartNodeType) => { +export const checkNodeValid = (_payload: StartNodeType) => { return true } diff --git a/web/app/components/workflow/nodes/template-transform/utils.ts b/web/app/components/workflow/nodes/template-transform/utils.ts index 0ca4849a0e..874ee2c1a1 100644 --- a/web/app/components/workflow/nodes/template-transform/utils.ts +++ b/web/app/components/workflow/nodes/template-transform/utils.ts @@ -1,5 +1,5 @@ import type { TemplateTransformNodeType } from './types' -export const checkNodeValid = (payload: TemplateTransformNodeType) => { +export const checkNodeValid = (_payload: TemplateTransformNodeType) => { return true } diff --git a/web/app/components/workflow/nodes/tool/utils.ts b/web/app/components/workflow/nodes/tool/utils.ts index 5ef2c537e5..c55d2f3807 100644 --- a/web/app/components/workflow/nodes/tool/utils.ts +++ b/web/app/components/workflow/nodes/tool/utils.ts @@ -1,5 +1,5 @@ import type { ToolNodeType } from './types' -export const checkNodeValid = (payload: ToolNodeType) => { +export const checkNodeValid = (_payload: ToolNodeType) => { return true } diff --git a/web/app/components/workflow/store/workflow/debug/inspect-vars-slice.ts b/web/app/components/workflow/store/workflow/debug/inspect-vars-slice.ts index 51f66bee13..291ed86ba3 100644 --- a/web/app/components/workflow/store/workflow/debug/inspect-vars-slice.ts +++ b/web/app/components/workflow/store/workflow/debug/inspect-vars-slice.ts @@ -23,7 +23,7 @@ type InspectVarsActions = { export type InspectVarsSliceShape = InspectVarsState & InspectVarsActions -export const createInspectVarsSlice: StateCreator = (set, get) => { +export const createInspectVarsSlice: StateCreator = (set) => { return ({ currentFocusNodeId: null, nodesWithInspectVars: [], @@ -75,11 +75,11 @@ export const createInspectVarsSlice: StateCreator = (set, if (!targetNode) return const targetVar = targetNode.vars.find(varItem => varItem.id === varId) - if(!targetVar) + if (!targetVar) return targetVar.value = value targetVar.edited = true - }, + }, ) return { nodesWithInspectVars: nodes, @@ -93,11 +93,11 @@ export const createInspectVarsSlice: StateCreator = (set, if (!targetNode) return const targetVar = targetNode.vars.find(varItem => varItem.id === varId) - if(!targetVar) + if (!targetVar) return targetVar.value = value targetVar.edited = false - }, + }, ) return { nodesWithInspectVars: nodes, @@ -111,11 +111,11 @@ export const createInspectVarsSlice: StateCreator = (set, if (!targetNode) return const targetVar = targetNode.vars.find(varItem => varItem.id === varId) - if(!targetVar) + if (!targetVar) return targetVar.name = selector[1] targetVar.selector = selector - }, + }, ) return { nodesWithInspectVars: nodes, @@ -131,7 +131,7 @@ export const createInspectVarsSlice: StateCreator = (set, const needChangeVarIndex = targetNode.vars.findIndex(varItem => varItem.id === varId) if (needChangeVarIndex !== -1) targetNode.vars.splice(needChangeVarIndex, 1) - }, + }, ) return { nodesWithInspectVars: nodes, diff --git a/web/eslint.config.mjs b/web/eslint.config.mjs index 8f1598e871..dda2beff02 100644 --- a/web/eslint.config.mjs +++ b/web/eslint.config.mjs @@ -82,6 +82,7 @@ export default combine( '**/.next/', '**/public/*', '**/*.json', + '**/*.js', ], }, { From 3b5130b03d2a9e98f1cdbe0404f2bef1d5b7c6cc Mon Sep 17 00:00:00 2001 From: heyszt <270985384@qq.com> Date: Tue, 5 Aug 2025 11:14:51 +0800 Subject: [PATCH 140/415] add otel instrument for redis and http request (#23371) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/extensions/ext_otel.py | 4 ++++ api/pyproject.toml | 2 ++ api/uv.lock | 36 +++++++++++++++++++++++++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/api/extensions/ext_otel.py b/api/extensions/ext_otel.py index b027a165f9..a8f025a750 100644 --- a/api/extensions/ext_otel.py +++ b/api/extensions/ext_otel.py @@ -136,6 +136,8 @@ def init_app(app: DifyApp): from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPSpanExporter from opentelemetry.instrumentation.celery import CeleryInstrumentor from opentelemetry.instrumentation.flask import FlaskInstrumentor + from opentelemetry.instrumentation.redis import RedisInstrumentor + from opentelemetry.instrumentation.requests import RequestsInstrumentor from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor from opentelemetry.metrics import get_meter, get_meter_provider, set_meter_provider from opentelemetry.propagate import set_global_textmap @@ -234,6 +236,8 @@ def init_app(app: DifyApp): CeleryInstrumentor(tracer_provider=get_tracer_provider(), meter_provider=get_meter_provider()).instrument() instrument_exception_logging() init_sqlalchemy_instrumentor(app) + RedisInstrumentor().instrument() + RequestsInstrumentor().instrument() atexit.register(shutdown_tracer) diff --git a/api/pyproject.toml b/api/pyproject.toml index d8f663ef8d..9d979eca1c 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -49,6 +49,8 @@ dependencies = [ "opentelemetry-instrumentation==0.48b0", "opentelemetry-instrumentation-celery==0.48b0", "opentelemetry-instrumentation-flask==0.48b0", + "opentelemetry-instrumentation-redis==0.48b0", + "opentelemetry-instrumentation-requests==0.48b0", "opentelemetry-instrumentation-sqlalchemy==0.48b0", "opentelemetry-propagator-b3==1.27.0", # opentelemetry-proto1.28.0 depends on protobuf (>=5.0,<6.0), diff --git a/api/uv.lock b/api/uv.lock index 4dced728ac..b00e7564f0 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.11, <3.13" resolution-markers = [ "python_full_version >= '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", @@ -1265,6 +1265,8 @@ dependencies = [ { name = "opentelemetry-instrumentation" }, { name = "opentelemetry-instrumentation-celery" }, { name = "opentelemetry-instrumentation-flask" }, + { name = "opentelemetry-instrumentation-redis" }, + { name = "opentelemetry-instrumentation-requests" }, { name = "opentelemetry-instrumentation-sqlalchemy" }, { name = "opentelemetry-propagator-b3" }, { name = "opentelemetry-proto" }, @@ -1448,6 +1450,8 @@ requires-dist = [ { name = "opentelemetry-instrumentation", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-celery", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-flask", specifier = "==0.48b0" }, + { name = "opentelemetry-instrumentation-redis", specifier = "==0.48b0" }, + { name = "opentelemetry-instrumentation-requests", specifier = "==0.48b0" }, { name = "opentelemetry-instrumentation-sqlalchemy", specifier = "==0.48b0" }, { name = "opentelemetry-propagator-b3", specifier = "==1.27.0" }, { name = "opentelemetry-proto", specifier = "==1.27.0" }, @@ -3670,6 +3674,36 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/78/3d/fcde4f8f0bf9fa1ee73a12304fa538076fb83fe0a2ae966ab0f0b7da5109/opentelemetry_instrumentation_flask-0.48b0-py3-none-any.whl", hash = "sha256:26b045420b9d76e85493b1c23fcf27517972423480dc6cf78fd6924248ba5808", size = 14588, upload-time = "2024-08-28T21:26:58.504Z" }, ] +[[package]] +name = "opentelemetry-instrumentation-redis" +version = "0.48b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/70/be/92e98e4c7f275be3d373899a41b0a7d4df64266657d985dbbdb9a54de0d5/opentelemetry_instrumentation_redis-0.48b0.tar.gz", hash = "sha256:61e33e984b4120e1b980d9fba6e9f7ca0c8d972f9970654d8f6e9f27fa115a8c", size = 10511, upload-time = "2024-08-28T21:28:15.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/40/892f30d400091106309cc047fd3f6d76a828fedd984a953fd5386b78a2fb/opentelemetry_instrumentation_redis-0.48b0-py3-none-any.whl", hash = "sha256:48c7f2e25cbb30bde749dc0d8b9c74c404c851f554af832956b9630b27f5bcb7", size = 11610, upload-time = "2024-08-28T21:27:18.759Z" }, +] + +[[package]] +name = "opentelemetry-instrumentation-requests" +version = "0.48b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-instrumentation" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "opentelemetry-util-http" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/52/ac/5eb78efde21ff21d0ad5dc8c6cc6a0f8ae482ce8a46293c2f45a628b6166/opentelemetry_instrumentation_requests-0.48b0.tar.gz", hash = "sha256:67ab9bd877a0352ee0db4616c8b4ae59736ddd700c598ed907482d44f4c9a2b3", size = 14120, upload-time = "2024-08-28T21:28:16.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/df/0df9226d1b14f29d23c07e6194b9fd5ad50e7d987b7fd13df7dcf718aeb1/opentelemetry_instrumentation_requests-0.48b0-py3-none-any.whl", hash = "sha256:d4f01852121d0bd4c22f14f429654a735611d4f7bf3cf93f244bdf1489b2233d", size = 12366, upload-time = "2024-08-28T21:27:20.771Z" }, +] + [[package]] name = "opentelemetry-instrumentation-sqlalchemy" version = "0.48b0" From 607dfc8be70d4b47b075c693266b8ae343990431 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 5 Aug 2025 13:15:26 +0800 Subject: [PATCH 141/415] fix: remove redundant useEffect from TagSelector component (#23406) --- web/app/components/base/tag-management/selector.tsx | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/web/app/components/base/tag-management/selector.tsx b/web/app/components/base/tag-management/selector.tsx index cd03eb84bc..026543cfa7 100644 --- a/web/app/components/base/tag-management/selector.tsx +++ b/web/app/components/base/tag-management/selector.tsx @@ -1,5 +1,5 @@ import type { FC } from 'react' -import { useEffect, useMemo, useState } from 'react' +import { useMemo, useState } from 'react' import { useContext } from 'use-context-selector' import { useTranslation } from 'react-i18next' import { useUnmount } from 'ahooks' @@ -231,11 +231,6 @@ const TagSelector: FC = ({ } } - useEffect(() => { - if (tagList.length === 0) - getTagList() - }, [type]) - const triggerContent = useMemo(() => { if (selectedTags?.length) return selectedTags.filter(selectedTag => tagList.find(tag => tag.id === selectedTag.id)).map(tag => tag.name).join(', ') From d080bea20bd8cfb89184f6f138c5e1bfaa4adfd6 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 5 Aug 2025 15:06:40 +0800 Subject: [PATCH 142/415] fix: resolve sidebar animation issues and improve app detail page UX (#23407) --- .../[datasetId]/layout-main.tsx | 73 ++++++++++++------- .../components/app-sidebar/dataset-info.tsx | 18 +++-- 2 files changed, 55 insertions(+), 36 deletions(-) diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx index 426778c835..d70179266a 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx @@ -56,33 +56,50 @@ const ExtraInfo = ({ isMobile, relatedApps, expand }: IExtraInfoProps) => { }, [isMobile, setShowTips]) return
    - {hasRelatedApps && ( - <> - {!isMobile && ( - - } - > -
    - {relatedAppsTotal || '--'} {t('common.datasetMenus.relatedApp')} - -
    -
    - )} + {/* Related apps for desktop */} +
    + + } + > +
    + {relatedAppsTotal || '--'} {t('common.datasetMenus.relatedApp')} + +
    +
    +
    - {isMobile &&
    - {relatedAppsTotal || '--'} - -
    } - - )} - {!hasRelatedApps && !expand && ( + {/* Related apps for mobile */} +
    +
    + {relatedAppsTotal || '--'} + +
    +
    + + {/* No related apps tooltip */} +
    {
    } > -
    +
    {t('common.datasetMenus.noRelatedApp')}
    - )} +
    } diff --git a/web/app/components/app-sidebar/dataset-info.tsx b/web/app/components/app-sidebar/dataset-info.tsx index 73740133ce..eee7cb3a2e 100644 --- a/web/app/components/app-sidebar/dataset-info.tsx +++ b/web/app/components/app-sidebar/dataset-info.tsx @@ -29,15 +29,17 @@ const DatasetInfo: FC = ({
    - {expand && ( -
    -
    - {name} -
    -
    {isExternal ? t('dataset.externalTag') : t('dataset.localDocs')}
    -
    {description}
    +
    +
    + {name}
    - )} +
    {isExternal ? t('dataset.externalTag') : t('dataset.localDocs')}
    +
    {description}
    +
    {extraInfo}
    ) From 4934dbd0e62526288bc7059c7227c94b4534cad7 Mon Sep 17 00:00:00 2001 From: Minamiyama Date: Tue, 5 Aug 2025 15:08:23 +0800 Subject: [PATCH 143/415] feat(workflow): add relations panel to visualize dependencies (#21998) Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/app/components/workflow/custom-edge.tsx | 3 +- .../workflow/hooks/use-nodes-interactions.ts | 134 +++++++++++++++++- .../workflow/hooks/use-shortcuts.ts | 33 +++++ .../_base/components/workflow-panel/tab.tsx | 1 + .../components/workflow/nodes/_base/node.tsx | 1 + web/app/components/workflow/types.ts | 4 +- web/i18n/en-US/workflow.ts | 9 ++ web/i18n/ja-JP/workflow.ts | 9 ++ web/i18n/zh-Hans/workflow.ts | 9 ++ web/i18n/zh-Hant/workflow.ts | 13 +- 10 files changed, 211 insertions(+), 5 deletions(-) diff --git a/web/app/components/workflow/custom-edge.tsx b/web/app/components/workflow/custom-edge.tsx index 4467b0adb5..4874fc700b 100644 --- a/web/app/components/workflow/custom-edge.tsx +++ b/web/app/components/workflow/custom-edge.tsx @@ -134,7 +134,8 @@ const CustomEdge = ({ style={{ stroke, strokeWidth: 2, - opacity: data._waitingRun ? 0.7 : 1, + opacity: data._dimmed ? 0.3 : (data._waitingRun ? 0.7 : 1), + strokeDasharray: data._isTemp ? '8 8' : undefined, }} /> diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index b598951adb..fdfb25b04d 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -1,5 +1,5 @@ import type { MouseEvent } from 'react' -import { useCallback, useRef } from 'react' +import { useCallback, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import produce from 'immer' import type { @@ -61,6 +61,7 @@ import { } from './use-workflow' import { WorkflowHistoryEvent, useWorkflowHistory } from './use-workflow-history' import useInspectVarsCrud from './use-inspect-vars-crud' +import { getNodeUsedVars } from '../nodes/_base/components/variable/utils' export const useNodesInteractions = () => { const { t } = useTranslation() @@ -1530,6 +1531,135 @@ export const useNodesInteractions = () => { setNodes(nodes) }, [redo, store, workflowHistoryStore, getNodesReadOnly, getWorkflowReadOnly]) + const [isDimming, setIsDimming] = useState(false) + /** Add opacity-30 to all nodes except the nodeId */ + const dimOtherNodes = useCallback(() => { + if (isDimming) + return + const { getNodes, setNodes, edges, setEdges } = store.getState() + const nodes = getNodes() + + const selectedNode = nodes.find(n => n.data.selected) + if (!selectedNode) + return + + setIsDimming(true) + + // const workflowNodes = useStore(s => s.getNodes()) + const workflowNodes = nodes + + const usedVars = getNodeUsedVars(selectedNode) + const dependencyNodes: Node[] = [] + usedVars.forEach((valueSelector) => { + const node = workflowNodes.find(node => node.id === valueSelector?.[0]) + if (node) { + if (!dependencyNodes.includes(node)) + dependencyNodes.push(node) + } + }) + + const outgoers = getOutgoers(selectedNode as Node, nodes as Node[], edges) + for (let currIdx = 0; currIdx < outgoers.length; currIdx++) { + const node = outgoers[currIdx] + const outgoersForNode = getOutgoers(node, nodes as Node[], edges) + outgoersForNode.forEach((item) => { + const existed = outgoers.some(v => v.id === item.id) + if (!existed) + outgoers.push(item) + }) + } + + const dependentNodes: Node[] = [] + outgoers.forEach((node) => { + const usedVars = getNodeUsedVars(node) + const used = usedVars.some(v => v?.[0] === selectedNode.id) + if (used) { + const existed = dependentNodes.some(v => v.id === node.id) + if (!existed) + dependentNodes.push(node) + } + }) + + const dimNodes = [...dependencyNodes, ...dependentNodes, selectedNode] + + const newNodes = produce(nodes, (draft) => { + draft.forEach((n) => { + const dimNode = dimNodes.find(v => v.id === n.id) + if (!dimNode) + n.data._dimmed = true + }) + }) + + setNodes(newNodes) + + const tempEdges: Edge[] = [] + + dependencyNodes.forEach((n) => { + tempEdges.push({ + id: `tmp_${n.id}-source-${selectedNode.id}-target`, + type: CUSTOM_EDGE, + source: n.id, + sourceHandle: 'source_tmp', + target: selectedNode.id, + targetHandle: 'target_tmp', + animated: true, + data: { + sourceType: n.data.type, + targetType: selectedNode.data.type, + _isTemp: true, + _connectedNodeIsHovering: true, + }, + }) + }) + dependentNodes.forEach((n) => { + tempEdges.push({ + id: `tmp_${selectedNode.id}-source-${n.id}-target`, + type: CUSTOM_EDGE, + source: selectedNode.id, + sourceHandle: 'source_tmp', + target: n.id, + targetHandle: 'target_tmp', + animated: true, + data: { + sourceType: selectedNode.data.type, + targetType: n.data.type, + _isTemp: true, + _connectedNodeIsHovering: true, + }, + }) + }) + + const newEdges = produce(edges, (draft) => { + draft.forEach((e) => { + e.data._dimmed = true + }) + draft.push(...tempEdges) + }) + setEdges(newEdges) + }, [isDimming, store]) + + /** Restore all nodes to full opacity */ + const undimAllNodes = useCallback(() => { + const { getNodes, setNodes, edges, setEdges } = store.getState() + const nodes = getNodes() + setIsDimming(false) + + const newNodes = produce(nodes, (draft) => { + draft.forEach((n) => { + n.data._dimmed = false + }) + }) + + setNodes(newNodes) + + const newEdges = produce(edges.filter(e => !e.data._isTemp), (draft) => { + draft.forEach((e) => { + e.data._dimmed = false + }) + }) + setEdges(newEdges) + }, [store]) + return { handleNodeDragStart, handleNodeDrag, @@ -1554,5 +1684,7 @@ export const useNodesInteractions = () => { handleNodeDisconnect, handleHistoryBack, handleHistoryForward, + dimOtherNodes, + undimAllNodes, } } diff --git a/web/app/components/workflow/hooks/use-shortcuts.ts b/web/app/components/workflow/hooks/use-shortcuts.ts index 118ec94058..def4eef9ce 100644 --- a/web/app/components/workflow/hooks/use-shortcuts.ts +++ b/web/app/components/workflow/hooks/use-shortcuts.ts @@ -25,6 +25,8 @@ export const useShortcuts = (): void => { handleNodesDelete, handleHistoryBack, handleHistoryForward, + dimOtherNodes, + undimAllNodes, } = useNodesInteractions() const { handleStartWorkflowRun } = useWorkflowStartRun() const { shortcutsEnabled: workflowHistoryShortcutsEnabled } = useWorkflowHistoryStore() @@ -211,4 +213,35 @@ export const useShortcuts = (): void => { exactMatch: true, useCapture: true, }) + + // Shift ↓ + useKeyPress( + 'shift', + (e) => { + console.log('Shift down', e) + if (shouldHandleShortcut(e)) + dimOtherNodes() + }, + { + exactMatch: true, + useCapture: true, + events: ['keydown'], + }, + ) + + // Shift ↑ + useKeyPress( + (e) => { + return e.key === 'Shift' + }, + (e) => { + if (shouldHandleShortcut(e)) + undimAllNodes() + }, + { + exactMatch: true, + useCapture: true, + events: ['keyup'], + }, + ) } diff --git a/web/app/components/workflow/nodes/_base/components/workflow-panel/tab.tsx b/web/app/components/workflow/nodes/_base/components/workflow-panel/tab.tsx index 09d7ed266d..08bbdf4068 100644 --- a/web/app/components/workflow/nodes/_base/components/workflow-panel/tab.tsx +++ b/web/app/components/workflow/nodes/_base/components/workflow-panel/tab.tsx @@ -7,6 +7,7 @@ import { useTranslation } from 'react-i18next' export enum TabType { settings = 'settings', lastRun = 'lastRun', + relations = 'relations', } type Props = { diff --git a/web/app/components/workflow/nodes/_base/node.tsx b/web/app/components/workflow/nodes/_base/node.tsx index 68f2e3d572..c2600fd035 100644 --- a/web/app/components/workflow/nodes/_base/node.tsx +++ b/web/app/components/workflow/nodes/_base/node.tsx @@ -143,6 +143,7 @@ const BaseNode: FC = ({ showSelectedBorder ? 'border-components-option-card-option-selected-border' : 'border-transparent', !showSelectedBorder && data._inParallelHovering && 'border-workflow-block-border-highlight', data._waitingRun && 'opacity-70', + data._dimmed && 'opacity-30', )} ref={nodeRef} style={{ diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index 5840a04f26..61ebdb64a2 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -94,6 +94,7 @@ export type CommonNodeType = { retry_config?: WorkflowRetryConfig default_value?: DefaultValueForm[] credential_id?: string + _dimmed?: boolean } & T & Partial> export type CommonEdgeType = { @@ -109,7 +110,8 @@ export type CommonEdgeType = { isInLoop?: boolean loop_id?: string sourceType: BlockEnum - targetType: BlockEnum + targetType: BlockEnum, + _isTemp?: boolean, } export type Node = ReactFlowNode> diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 9a8492f50e..10b74dadb3 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -943,6 +943,7 @@ const translation = { debug: { settingsTab: 'Settings', lastRunTab: 'Last Run', + relationsTab: 'Relations', noData: { description: 'The results of the last run will be displayed here', runThisNode: 'Run this node', @@ -968,6 +969,14 @@ const translation = { chatNode: 'Conversation', systemNode: 'System', }, + relations: { + dependencies: 'Dependencies', + dependents: 'Dependents', + dependenciesDescription: 'Nodes that this node relies on', + dependentsDescription: 'Nodes that rely on this node', + noDependencies: 'No dependencies', + noDependents: 'No dependents', + }, }, } diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index 59791c5c7e..a987efdfb1 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -968,6 +968,15 @@ const translation = { }, settingsTab: '設定', lastRunTab: '最後の実行', + relationsTab: '関係', + relations: { + dependencies: '依存元', + dependents: '依存先', + dependenciesDescription: 'このノードが依存している他のノード', + dependentsDescription: 'このノードに依存している他のノード', + noDependencies: '依存元なし', + noDependents: '依存先なし', + }, }, } diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index 25f05ce8ba..dbc37a7b38 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -943,6 +943,7 @@ const translation = { debug: { settingsTab: '设置', lastRunTab: '上次运行', + relationsTab: '关系', noData: { description: '上次运行的结果将显示在这里', runThisNode: '运行此节点', @@ -968,6 +969,14 @@ const translation = { chatNode: '会话变量', systemNode: '系统变量', }, + relations: { + dependencies: '依赖', + dependents: '被依赖', + dependenciesDescription: '本节点依赖的其他节点', + dependentsDescription: '依赖于本节点的其他节点', + noDependencies: '无依赖', + noDependents: '无被依赖', + }, }, } diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index 6a5e990909..ce10ad387f 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -941,6 +941,9 @@ const translation = { copyId: '複製ID', }, debug: { + settingsTab: '設定', + lastRunTab: '最後一次運行', + relationsTab: '關係', noData: { runThisNode: '運行此節點', description: '上次運行的結果將顯示在這裡', @@ -966,8 +969,14 @@ const translation = { emptyTip: '在畫布上逐步執行節點或逐步運行節點後,您可以在變數檢視中查看節點變數的當前值。', resetConversationVar: '將對話變數重置為默認值', }, - settingsTab: '設定', - lastRunTab: '最後一次運行', + relations: { + dependencies: '依賴', + dependents: '被依賴', + dependenciesDescription: '此節點所依賴的其他節點', + dependentsDescription: '依賴此節點的其他節點', + noDependencies: '無依賴', + noDependents: '無被依賴', + }, }, } From 904af200235e2fc1f50266799a5471566385aeea Mon Sep 17 00:00:00 2001 From: Xiyuan Chen <52963600+GareArc@users.noreply.github.com> Date: Tue, 5 Aug 2025 16:07:48 +0800 Subject: [PATCH 144/415] Feat/webapp opt (#23283) --- .../console/explore/installed_app.py | 37 ++++++++++++++----- api/services/enterprise/enterprise_service.py | 10 +++++ 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/api/controllers/console/explore/installed_app.py b/api/controllers/console/explore/installed_app.py index 6d9f794307..ad62bd6e08 100644 --- a/api/controllers/console/explore/installed_app.py +++ b/api/controllers/console/explore/installed_app.py @@ -58,21 +58,38 @@ class InstalledAppsListApi(Resource): # filter out apps that user doesn't have access to if FeatureService.get_system_features().webapp_auth.enabled: user_id = current_user.id - res = [] app_ids = [installed_app["app"].id for installed_app in installed_app_list] webapp_settings = EnterpriseService.WebAppAuth.batch_get_app_access_mode_by_id(app_ids) + + # Pre-filter out apps without setting or with sso_verified + filtered_installed_apps = [] + app_id_to_app_code = {} + for installed_app in installed_app_list: - webapp_setting = webapp_settings.get(installed_app["app"].id) - if not webapp_setting: + app_id = installed_app["app"].id + webapp_setting = webapp_settings.get(app_id) + if not webapp_setting or webapp_setting.access_mode == "sso_verified": continue - if webapp_setting.access_mode == "sso_verified": - continue - app_code = AppService.get_app_code_by_id(str(installed_app["app"].id)) - if EnterpriseService.WebAppAuth.is_user_allowed_to_access_webapp( - user_id=user_id, - app_code=app_code, - ): + app_code = AppService.get_app_code_by_id(str(app_id)) + app_id_to_app_code[app_id] = app_code + filtered_installed_apps.append(installed_app) + + app_codes = list(app_id_to_app_code.values()) + + # Batch permission check + permissions = EnterpriseService.WebAppAuth.batch_is_user_allowed_to_access_webapps( + user_id=user_id, + app_codes=app_codes, + ) + + # Keep only allowed apps + res = [] + for installed_app in filtered_installed_apps: + app_id = installed_app["app"].id + app_code = app_id_to_app_code[app_id] + if permissions.get(app_code): res.append(installed_app) + installed_app_list = res logger.debug("installed_app_list: %s, user_id: %s", installed_app_list, user_id) diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index 54d45f45ea..f8612456d6 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -52,6 +52,16 @@ class EnterpriseService: return data.get("result", False) + @classmethod + def batch_is_user_allowed_to_access_webapps(cls, user_id: str, app_codes: list[str]): + if not app_codes: + return {} + body = {"userId": user_id, "appCodes": app_codes} + data = EnterpriseRequest.send_request("POST", "/webapp/permission/batch", json=body) + if not data: + raise ValueError("No data found.") + return data.get("permissions", {}) + @classmethod def get_app_access_mode_by_id(cls, app_id: str) -> WebAppSettings: if not app_id: From 52050d3dff1edb5dbc3729f58fd79f9b69bdaa57 Mon Sep 17 00:00:00 2001 From: Xiyuan Chen <52963600+GareArc@users.noreply.github.com> Date: Tue, 5 Aug 2025 16:42:07 +0800 Subject: [PATCH 145/415] feat(workflow): add support for release/e-* tag in build-push workflow (#23418) --- .github/workflows/build-push.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index b933560a5e..17af047267 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -7,6 +7,7 @@ on: - "deploy/dev" - "deploy/enterprise" - "build/**" + - "release/e-*" tags: - "*" From 5eb061466f385eb5fc9a686a7ebfc1a59ebf0a33 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 5 Aug 2025 04:35:30 -0700 Subject: [PATCH 146/415] chore: update tmpl (#23438) --- .github/ISSUE_TEMPLATE/chore.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/chore.yaml b/.github/ISSUE_TEMPLATE/chore.yaml index 953399bf77..43449ef942 100644 --- a/.github/ISSUE_TEMPLATE/chore.yaml +++ b/.github/ISSUE_TEMPLATE/chore.yaml @@ -1,6 +1,6 @@ -name: "✨ Enhancement / Refactor" -description: Suggest an enhancement or request a code refactor. -title: "[Enhancement/Refactor] " +name: "✨ Refactor" +description: Refactor existing code for improved readability and maintainability. +title: "[Chore/Refactor] " labels: - refactor body: @@ -8,14 +8,14 @@ body: id: description attributes: label: Description - placeholder: "Describe the enhancement or refactor you are proposing." + placeholder: "Describe the refactor you are proposing." validations: required: true - type: textarea id: motivation attributes: label: Motivation - placeholder: "Why is this enhancement or refactor needed?" + placeholder: "Explain why this refactor is necessary." validations: required: false - type: textarea From 2cd3fe0dce6fdde6a3f61e596573669aee4bfcf7 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 5 Aug 2025 19:36:07 +0800 Subject: [PATCH 147/415] fix: Multiple UI component improvements and code quality enhancements (#23446) --- .../svg-attribute-error-reproduction.spec.tsx | 156 ++++++++++++++++++ .../overview/tracing/config-button.tsx | 27 +-- .../[appId]/overview/tracing/panel.tsx | 126 +++++++------- web/app/components/apps/footer.tsx | 6 +- web/app/components/base/icons/utils.ts | 17 ++ .../components/base/tag-management/filter.tsx | 5 +- 6 files changed, 249 insertions(+), 88 deletions(-) create mode 100644 web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx new file mode 100644 index 0000000000..a3281be8eb --- /dev/null +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx @@ -0,0 +1,156 @@ +import React from 'react' +import { render } from '@testing-library/react' +import '@testing-library/jest-dom' +import { OpikIconBig } from '@/app/components/base/icons/src/public/tracing' + +// Mock dependencies to isolate the SVG rendering issue +jest.mock('react-i18next', () => ({ + useTranslation: () => ({ + t: (key: string) => key, + }), +})) + +describe('SVG Attribute Error Reproduction', () => { + // Capture console errors + const originalError = console.error + let errorMessages: string[] = [] + + beforeEach(() => { + errorMessages = [] + console.error = jest.fn((message) => { + errorMessages.push(message) + originalError(message) + }) + }) + + afterEach(() => { + console.error = originalError + }) + + it('should reproduce inkscape attribute errors when rendering OpikIconBig', () => { + console.log('\n=== TESTING OpikIconBig SVG ATTRIBUTE ERRORS ===') + + // Test multiple renders to check for inconsistency + for (let i = 0; i < 5; i++) { + console.log(`\nRender attempt ${i + 1}:`) + + const { unmount } = render() + + // Check for specific inkscape attribute errors + const inkscapeErrors = errorMessages.filter(msg => + typeof msg === 'string' && msg.includes('inkscape'), + ) + + if (inkscapeErrors.length > 0) { + console.log(`Found ${inkscapeErrors.length} inkscape errors:`) + inkscapeErrors.forEach((error, index) => { + console.log(` ${index + 1}. ${error.substring(0, 100)}...`) + }) + } + else { + console.log('No inkscape errors found in this render') + } + + unmount() + + // Clear errors for next iteration + errorMessages = [] + } + }) + + it('should analyze the SVG structure causing the errors', () => { + console.log('\n=== ANALYZING SVG STRUCTURE ===') + + // Import the JSON data directly + const iconData = require('@/app/components/base/icons/src/public/tracing/OpikIconBig.json') + + console.log('Icon structure analysis:') + console.log('- Root element:', iconData.icon.name) + console.log('- Children count:', iconData.icon.children?.length || 0) + + // Find problematic elements + const findProblematicElements = (node: any, path = '') => { + const problematicElements: any[] = [] + + if (node.name && (node.name.includes(':') || node.name.startsWith('sodipodi'))) { + problematicElements.push({ + path, + name: node.name, + attributes: Object.keys(node.attributes || {}), + }) + } + + // Check attributes for inkscape/sodipodi properties + if (node.attributes) { + const problematicAttrs = Object.keys(node.attributes).filter(attr => + attr.startsWith('inkscape:') || attr.startsWith('sodipodi:'), + ) + + if (problematicAttrs.length > 0) { + problematicElements.push({ + path, + name: node.name, + problematicAttributes: problematicAttrs, + }) + } + } + + if (node.children) { + node.children.forEach((child: any, index: number) => { + problematicElements.push( + ...findProblematicElements(child, `${path}/${node.name}[${index}]`), + ) + }) + } + + return problematicElements + } + + const problematicElements = findProblematicElements(iconData.icon, 'root') + + console.log(`\n🚨 Found ${problematicElements.length} problematic elements:`) + problematicElements.forEach((element, index) => { + console.log(`\n${index + 1}. Element: ${element.name}`) + console.log(` Path: ${element.path}`) + if (element.problematicAttributes) + console.log(` Problematic attributes: ${element.problematicAttributes.join(', ')}`) + }) + }) + + it('should test the normalizeAttrs function behavior', () => { + console.log('\n=== TESTING normalizeAttrs FUNCTION ===') + + const { normalizeAttrs } = require('@/app/components/base/icons/utils') + + const testAttributes = { + 'inkscape:showpageshadow': '2', + 'inkscape:pageopacity': '0.0', + 'inkscape:pagecheckerboard': '0', + 'inkscape:deskcolor': '#d1d1d1', + 'sodipodi:docname': 'opik-icon-big.svg', + 'xmlns:inkscape': 'https://www.inkscape.org/namespaces/inkscape', + 'xmlns:sodipodi': 'https://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd', + 'xmlns:svg': 'https://www.w3.org/2000/svg', + 'data-name': 'Layer 1', + 'normal-attr': 'value', + 'class': 'test-class', + } + + console.log('Input attributes:', Object.keys(testAttributes)) + + const normalized = normalizeAttrs(testAttributes) + + console.log('Normalized attributes:', Object.keys(normalized)) + console.log('Normalized values:', normalized) + + // Check if problematic attributes are still present + const problematicKeys = Object.keys(normalized).filter(key => + key.toLowerCase().includes('inkscape') || key.toLowerCase().includes('sodipodi'), + ) + + if (problematicKeys.length > 0) + console.log(`🚨 PROBLEM: Still found problematic attributes: ${problematicKeys.join(', ')}`) + else + console.log('✅ No problematic attributes found after normalization') + }) +}) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-button.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-button.tsx index 3d05575127..1ab40e31bf 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-button.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-button.tsx @@ -1,12 +1,9 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useEffect, useRef, useState } from 'react' -import { - RiEqualizer2Line, -} from '@remixicon/react' +import React, { useCallback, useRef, useState } from 'react' + import type { PopupProps } from './config-popup' import ConfigPopup from './config-popup' -import cn from '@/utils/classnames' import { PortalToFollowElem, PortalToFollowElemContent, @@ -17,13 +14,13 @@ type Props = { readOnly: boolean className?: string hasConfigured: boolean - controlShowPopup?: number + children?: React.ReactNode } & PopupProps const ConfigBtn: FC = ({ className, hasConfigured, - controlShowPopup, + children, ...popupProps }) => { const [open, doSetOpen] = useState(false) @@ -37,13 +34,6 @@ const ConfigBtn: FC = ({ setOpen(!openRef.current) }, [setOpen]) - useEffect(() => { - if (controlShowPopup) - // setOpen(!openRef.current) - setOpen(true) - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [controlShowPopup]) - if (popupProps.readOnly && !hasConfigured) return null @@ -52,14 +42,11 @@ const ConfigBtn: FC = ({ open={open} onOpenChange={setOpen} placement='bottom-end' - offset={{ - mainAxis: 12, - crossAxis: hasConfigured ? 8 : 49, - }} + offset={12} > -
    - +
    + {children}
    diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx index d082523222..7564a0f3c8 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx @@ -1,8 +1,9 @@ 'use client' import type { FC } from 'react' -import React, { useCallback, useEffect, useState } from 'react' +import React, { useEffect, useState } from 'react' import { RiArrowDownDoubleLine, + RiEqualizer2Line, } from '@remixicon/react' import { useTranslation } from 'react-i18next' import { usePathname } from 'next/navigation' @@ -180,10 +181,6 @@ const Panel: FC = () => { })() }, []) - const [controlShowPopup, setControlShowPopup] = useState(0) - const showPopup = useCallback(() => { - setControlShowPopup(Date.now()) - }, [setControlShowPopup]) if (!isLoaded) { return (
    @@ -196,46 +193,66 @@ const Panel: FC = () => { return (
    -
    - {!inUseTracingProvider && ( - <> + {!inUseTracingProvider && ( + +
    {t(`${I18N_PREFIX}.title`)}
    -
    e.stopPropagation()}> - +
    +
    - - )} - {hasConfiguredTracing && ( - <> +
    + + )} + {hasConfiguredTracing && ( + +
    @@ -243,33 +260,14 @@ const Panel: FC = () => {
    {InUseProviderIcon && } - -
    e.stopPropagation()}> - +
    +
    - - )} -
    -
    + +
    +
    + )} +
    ) } export default React.memo(Panel) diff --git a/web/app/components/apps/footer.tsx b/web/app/components/apps/footer.tsx index 18b7779651..c5efb2b8b4 100644 --- a/web/app/components/apps/footer.tsx +++ b/web/app/components/apps/footer.tsx @@ -36,13 +36,13 @@ const Footer = () => { return null return ( -
    +

    {t('app.join')}

    {t('app.communityIntro')}

    diff --git a/web/app/components/base/icons/utils.ts b/web/app/components/base/icons/utils.ts index 90d075f01c..632e362075 100644 --- a/web/app/components/base/icons/utils.ts +++ b/web/app/components/base/icons/utils.ts @@ -14,9 +14,26 @@ export type Attrs = { export function normalizeAttrs(attrs: Attrs = {}): Attrs { return Object.keys(attrs).reduce((acc: Attrs, key) => { + // Filter out editor metadata attributes before processing + if (key.startsWith('inkscape:') + || key.startsWith('sodipodi:') + || key.startsWith('xmlns:inkscape') + || key.startsWith('xmlns:sodipodi') + || key.startsWith('xmlns:svg') + || key === 'data-name') + return acc + const val = attrs[key] key = key.replace(/([-]\w)/g, (g: string) => g[1].toUpperCase()) key = key.replace(/([:]\w)/g, (g: string) => g[1].toUpperCase()) + + // Additional filter after camelCase conversion + if (key === 'xmlnsInkscape' + || key === 'xmlnsSodipodi' + || key === 'xmlnsSvg' + || key === 'dataName') + return acc + switch (key) { case 'class': acc.className = val diff --git a/web/app/components/base/tag-management/filter.tsx b/web/app/components/base/tag-management/filter.tsx index ecc159b2fc..4cf01fdc26 100644 --- a/web/app/components/base/tag-management/filter.tsx +++ b/web/app/components/base/tag-management/filter.tsx @@ -139,7 +139,10 @@ const TagFilter: FC = ({
    -
    setShowTagManagementModal(true)}> +
    { + setShowTagManagementModal(true) + setOpen(false) + }}>
    {t('common.tag.manageTags')} From 84543a591a98eb42db4956f0c68e5a6efcbe5d7f Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 5 Aug 2025 19:36:25 +0800 Subject: [PATCH 148/415] i18n/sync (#23429) --- web/i18n/de-DE/workflow.ts | 9 +++++++++ web/i18n/es-ES/workflow.ts | 9 +++++++++ web/i18n/fa-IR/workflow.ts | 12 +++++++++--- web/i18n/fr-FR/workflow.ts | 12 +++++++++--- web/i18n/hi-IN/workflow.ts | 12 +++++++++--- web/i18n/it-IT/workflow.ts | 12 +++++++++--- web/i18n/ko-KR/workflow.ts | 9 +++++++++ web/i18n/pl-PL/workflow.ts | 12 +++++++++--- web/i18n/pt-BR/workflow.ts | 12 +++++++++--- web/i18n/ro-RO/workflow.ts | 12 +++++++++--- web/i18n/ru-RU/workflow.ts | 12 +++++++++--- web/i18n/sl-SI/workflow.ts | 9 +++++++++ web/i18n/th-TH/workflow.ts | 12 +++++++++--- web/i18n/tr-TR/workflow.ts | 12 +++++++++--- web/i18n/uk-UA/workflow.ts | 12 +++++++++--- web/i18n/vi-VN/workflow.ts | 12 +++++++++--- 16 files changed, 144 insertions(+), 36 deletions(-) diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index 1bd965c731..79e54b96c0 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -968,6 +968,15 @@ const translation = { }, settingsTab: 'Einstellungen', lastRunTab: 'Letzte Ausführung', + relations: { + dependents: 'Angehörige', + dependenciesDescription: 'Knoten, auf die sich dieser Knoten stützt', + dependencies: 'Abhängigkeiten', + noDependencies: 'Keine Abhängigkeiten', + dependentsDescription: 'Knoten, die auf diesem Knoten basieren', + noDependents: 'Keine Angehörigen', + }, + relationsTab: 'Beziehungen', }, } diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index d4958c3c0d..5801647611 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -968,6 +968,15 @@ const translation = { }, lastRunTab: 'Última ejecución', settingsTab: 'Ajustes', + relations: { + dependents: 'Dependientes', + dependenciesDescription: 'Nodos en los que se basa este nodo', + dependentsDescription: 'Nodos que dependen de este nodo', + noDependencies: 'Sin dependencias', + noDependents: 'Sin dependientes', + dependencies: 'Dependencias', + }, + relationsTab: 'Relaciones', }, } diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index b3de497c2f..55086a1a3b 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -104,9 +104,7 @@ const translation = { noHistory: 'بدون تاریخچه', loadMore: 'بارگذاری گردش کار بیشتر', exportPNG: 'صادرات به فرمت PNG', - noExist: 'هیچگونه متغیری وجود ندارد', exitVersions: 'نسخه‌های خروجی', - referenceVar: 'متغیر مرجع', exportSVG: 'صادرات به فرمت SVG', exportJPEG: 'صادرات به فرمت JPEG', exportImage: 'تصویر را صادر کنید', @@ -608,7 +606,6 @@ const translation = { }, select: 'انتخاب', addSubVariable: 'متغیر فرعی', - condition: 'شرط', }, variableAssigner: { title: 'تخصیص متغیرها', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'تنظیمات', lastRunTab: 'آخرین اجرا', + relations: { + dependents: 'وابسته', + dependencies: 'وابسته', + noDependents: 'بدون وابستگان', + noDependencies: 'بدون وابستگی', + dependenciesDescription: 'گره هایی که این گره به آنها متکی است', + dependentsDescription: 'گره هایی که به این گره متکی هستند', + }, + relationsTab: 'روابط', }, } diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index adc3eb125c..20d5d23cee 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -107,9 +107,7 @@ const translation = { exitVersions: 'Versions de sortie', exportSVG: 'Exporter en SVG', publishUpdate: 'Publier une mise à jour', - noExist: 'Aucune variable de ce type', versionHistory: 'Historique des versions', - referenceVar: 'Variable de référence', exportImage: 'Exporter l\'image', exportJPEG: 'Exporter en JPEG', needEndNode: 'Le nœud de fin doit être ajouté', @@ -608,7 +606,6 @@ const translation = { }, select: 'Choisir', addSubVariable: 'Sous-variable', - condition: 'Condition', }, variableAssigner: { title: 'Attribuer des variables', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'Paramètres', lastRunTab: 'Dernière Exécution', + relations: { + dependencies: 'Dépendances', + dependentsDescription: 'Nœuds qui s’appuient sur ce nœud', + noDependents: 'Pas de personnes à charge', + dependents: 'Dépendants', + noDependencies: 'Aucune dépendance', + dependenciesDescription: 'Nœuds sur lesquels repose ce nœud', + }, + relationsTab: 'Relations', }, } diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index 923abfaeb5..74c5e04097 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -109,8 +109,6 @@ const translation = { exitVersions: 'निकलने के संस्करण', exportPNG: 'PNG के रूप में निर्यात करें', exportJPEG: 'JPEG के रूप में निर्यात करें', - referenceVar: 'संदर्भ चर', - noExist: 'कोई ऐसा चर नहीं है', exportImage: 'छवि निर्यात करें', publishUpdate: 'अपडेट प्रकाशित करें', exportSVG: 'SVG के रूप में निर्यात करें', @@ -623,7 +621,6 @@ const translation = { }, select: 'चुनना', addSubVariable: 'उप चर', - condition: 'स्थिति', }, variableAssigner: { title: 'वेरिएबल्स असाइन करें', @@ -991,6 +988,15 @@ const translation = { }, settingsTab: 'सेटिंग्स', lastRunTab: 'अंतिम रन', + relations: { + dependents: 'निष्पाभ लोग', + dependentsDescription: 'इस नोड पर निर्भर नोड्स', + dependencies: 'निर्भरता', + noDependents: 'कोई आश्रित नहीं', + dependenciesDescription: 'यह नोड जिस नोड पर निर्भर करता है', + noDependencies: 'कोई निर्भरताएँ नहीं', + }, + relationsTab: 'रिश्ते', }, } diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index 49b3a11e38..d006eba2be 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -110,11 +110,9 @@ const translation = { publishUpdate: 'Pubblica aggiornamento', versionHistory: 'Cronologia delle versioni', exitVersions: 'Uscita Versioni', - referenceVar: 'Variabile di riferimento', exportSVG: 'Esporta come SVG', exportImage: 'Esporta immagine', exportJPEG: 'Esporta come JPEG', - noExist: 'Nessuna variabile del genere', exportPNG: 'Esporta come PNG', needEndNode: 'Deve essere aggiunto il nodo finale', addBlock: 'Aggiungi nodo', @@ -627,7 +625,6 @@ const translation = { }, addSubVariable: 'Variabile secondaria', select: 'Selezionare', - condition: 'Condizione', }, variableAssigner: { title: 'Assegna variabili', @@ -997,6 +994,15 @@ const translation = { }, settingsTab: 'Impostazioni', lastRunTab: 'Ultima corsa', + relations: { + dependents: 'Dipendenti', + noDependencies: 'Nessuna dipendenza', + dependencies: 'Dipendenze', + noDependents: 'Nessuna persona a carico', + dependentsDescription: 'Nodi che si basano su questo nodo', + dependenciesDescription: 'Nodi su cui si basa questo nodo', + }, + relationsTab: 'Relazioni', }, } diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index 9b1ec69603..bc3ed580b6 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -1019,6 +1019,15 @@ const translation = { }, settingsTab: '설정', lastRunTab: '마지막 실행', + relations: { + dependencies: '종속성', + dependentsDescription: '이 노드에 의존하는 노드', + noDependents: '부양가족 없음', + noDependencies: '종속성 없음', + dependents: '부양 가족', + dependenciesDescription: '이 노드가 의존하는 노드', + }, + relationsTab: '관계', }, } diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 8c17ba0ff6..29ab3ff182 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -108,10 +108,8 @@ const translation = { versionHistory: 'Historia wersji', exportSVG: 'Eksportuj jako SVG', exportJPEG: 'Eksportuj jako JPEG', - noExist: 'Nie ma takiej zmiennej', exportPNG: 'Eksportuj jako PNG', publishUpdate: 'Opublikuj aktualizację', - referenceVar: 'Zmienna odniesienia', addBlock: 'Dodaj węzeł', needEndNode: 'Należy dodać węzeł końcowy', needAnswerNode: 'Węzeł odpowiedzi musi zostać dodany', @@ -608,7 +606,6 @@ const translation = { }, addSubVariable: 'Zmienna podrzędna', select: 'Wybrać', - condition: 'Stan', }, variableAssigner: { title: 'Przypisz zmienne', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'Ustawienia', lastRunTab: 'Ostatnie uruchomienie', + relations: { + dependencies: 'Zależności', + dependenciesDescription: 'Węzły, na których opiera się ten węzeł', + noDependents: 'Brak osób na utrzymaniu', + dependents: 'Zależności', + dependentsDescription: 'Węzły, które opierają się na tym węźle', + noDependencies: 'Brak zależności', + }, + relationsTab: 'Stosunków', }, } diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index 4d933994db..8c6c3df90a 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -107,8 +107,6 @@ const translation = { publishUpdate: 'Publicar Atualização', versionHistory: 'Histórico de Versão', exportImage: 'Exportar Imagem', - referenceVar: 'Variável de Referência', - noExist: 'Nenhuma variável desse tipo', exitVersions: 'Versões de Sair', exportSVG: 'Exportar como SVG', exportJPEG: 'Exportar como JPEG', @@ -608,7 +606,6 @@ const translation = { }, addSubVariable: 'Subvariável', select: 'Selecionar', - condition: 'Condição', }, variableAssigner: { title: 'Atribuir variáveis', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'Configurações', lastRunTab: 'Última execução', + relations: { + noDependents: 'Sem dependentes', + dependenciesDescription: 'Nós dos quais esse nó depende', + dependents: 'Dependentes', + dependencies: 'Dependências', + dependentsDescription: 'Nós que dependem desse nó', + noDependencies: 'Sem dependências', + }, + relationsTab: 'Relações', }, } diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index b4eb41d041..a994edd78f 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -106,11 +106,9 @@ const translation = { exportImage: 'Exportă imaginea', exportSVG: 'Exportă ca SVG', exportPNG: 'Exportă ca PNG', - noExist: 'Nu există o astfel de variabilă', exitVersions: 'Ieșire Versiuni', versionHistory: 'Istoricul versiunilor', publishUpdate: 'Publicați actualizarea', - referenceVar: 'Variabilă de referință', exportJPEG: 'Exportă ca JPEG', addBlock: 'Adaugă nod', needAnswerNode: 'Nodul de răspuns trebuie adăugat', @@ -608,7 +606,6 @@ const translation = { }, select: 'Alege', addSubVariable: 'Subvariabilă', - condition: 'Condiție', }, variableAssigner: { title: 'Atribuie variabile', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'Setări', lastRunTab: 'Ultima execuție', + relations: { + dependencies: 'Dependenţele', + noDependencies: 'Fără dependențe', + dependents: 'Dependenţe', + noDependents: 'Fără persoane aflate în întreținere', + dependentsDescription: 'Noduri care se bazează pe acest nod', + dependenciesDescription: 'Noduri pe care se bazează acest nod', + }, + relationsTab: 'Relații', }, } diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 87982d1331..81534334b4 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -103,12 +103,10 @@ const translation = { addFailureBranch: 'Добавить ветвь Fail', noHistory: 'Без истории', loadMore: 'Загрузите больше рабочих процессов', - noExist: 'Такой переменной не существует', versionHistory: 'История версий', exportPNG: 'Экспортировать как PNG', exportImage: 'Экспортировать изображение', exportJPEG: 'Экспортировать как JPEG', - referenceVar: 'Ссылочная переменная', exitVersions: 'Выходные версии', exportSVG: 'Экспортировать как SVG', publishUpdate: 'Опубликовать обновление', @@ -608,7 +606,6 @@ const translation = { }, select: 'Выбирать', addSubVariable: 'Подпеременная', - condition: 'Условие', }, variableAssigner: { title: 'Назначить переменные', @@ -971,6 +968,15 @@ const translation = { }, lastRunTab: 'Последний запуск', settingsTab: 'Настройки', + relations: { + dependencies: 'Зависимости', + dependents: 'Иждивенцев', + noDependencies: 'Нет зависимостей', + dependentsDescription: 'Узлы, которые полагаются на этот узел', + noDependents: 'Отсутствие иждивенцев', + dependenciesDescription: 'Узлы, на которые опирается этот узел', + }, + relationsTab: 'Отношения', }, } diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index f267fb0d50..da8d19aa06 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -968,6 +968,15 @@ const translation = { }, settingsTab: 'Nastavitve', lastRunTab: 'Zadnji zagon', + relations: { + dependencies: 'Odvisnosti', + dependents: 'Odvisnim', + noDependents: 'Brez vzdrževanih oseb', + dependentsDescription: 'Vozlišča, ki se zanašajo na to vozlišče', + dependenciesDescription: 'Vozlišča, na katera se zanaša to vozlišče', + noDependencies: 'Brez odvisnosti', + }, + relationsTab: 'Odnose', }, } diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index a9a1ca7923..78a49716b0 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -105,9 +105,7 @@ const translation = { noHistory: 'ไม่มีประวัติ', versionHistory: 'ประวัติรุ่น', exportPNG: 'ส่งออกเป็น PNG', - noExist: 'ไม่มีตัวแปรดังกล่าว', exportJPEG: 'ส่งออกเป็น JPEG', - referenceVar: 'ตัวแปรอ้างอิง', publishUpdate: 'เผยแพร่การอัปเดต', exitVersions: 'ออกเวอร์ชัน', exportImage: 'ส่งออกภาพ', @@ -608,7 +606,6 @@ const translation = { selectVariable: 'เลือกตัวแปร...', addSubVariable: 'ตัวแปรย่อย', select: 'เลือก', - condition: 'เงื่อนไข', }, variableAssigner: { title: 'กําหนดตัวแปร', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'การตั้งค่า', lastRunTab: 'รอบสุดท้าย', + relations: { + dependents: 'ผู้อยู่ในอุปการะ', + dependencies: 'อ้าง อิง', + dependenciesDescription: 'โหนดที่โหนดนี้อาศัย', + noDependencies: 'ไม่มีการพึ่งพา', + noDependents: 'ไม่มีผู้อยู่ในอุปการะ', + dependentsDescription: 'โหนดที่อาศัยโหนดนี้', + }, + relationsTab: 'สัมพันธ์', }, } diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 499ba86807..00310524de 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -109,9 +109,7 @@ const translation = { exitVersions: 'Çıkış Sürümleri', versionHistory: 'Sürüm Geçmişi', exportJPEG: 'JPEG olarak dışa aktar', - noExist: 'Böyle bir değişken yok', exportSVG: 'SVG olarak dışa aktar', - referenceVar: 'Referans Değişken', addBlock: 'Düğüm Ekle', needAnswerNode: 'Cevap düğümü eklenmelidir.', needEndNode: 'Son düğüm eklenmelidir', @@ -609,7 +607,6 @@ const translation = { }, addSubVariable: 'Alt Değişken', select: 'Seçmek', - condition: 'Koşul', }, variableAssigner: { title: 'Değişken ata', @@ -972,6 +969,15 @@ const translation = { }, lastRunTab: 'Son Koşu', settingsTab: 'Ayarlar', + relations: { + noDependents: 'Bakmakla yükümlü olunan kişi yok', + dependentsDescription: 'Bu düğüme dayanan düğümler', + dependenciesDescription: 'Bu düğümün dayandığı düğümler', + dependencies: 'Bağımlılık', + dependents: 'Bağımlı', + noDependencies: 'Bağımlılık yok', + }, + relationsTab: 'Ilişkiler', }, } diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index dea3704d85..fd6e71adf4 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -103,9 +103,7 @@ const translation = { addFailureBranch: 'Додано гілку помилки', noHistory: 'Без історії', loadMore: 'Завантажте більше робочих процесів', - referenceVar: 'Посилальна змінна', exportPNG: 'Експортувати як PNG', - noExist: 'Такої змінної не існує', exitVersions: 'Вихідні версії', versionHistory: 'Історія версій', publishUpdate: 'Опублікувати оновлення', @@ -608,7 +606,6 @@ const translation = { }, select: 'Виберіть', addSubVariable: 'Підзмінна', - condition: 'Умова', }, variableAssigner: { title: 'Присвоєння змінних', @@ -971,6 +968,15 @@ const translation = { }, lastRunTab: 'Останній запуск', settingsTab: 'Налаштування', + relations: { + noDependents: 'Без утриманців', + dependents: 'Утриманців', + dependencies: 'Залежностей', + noDependencies: 'Відсутність залежностей', + dependenciesDescription: 'Вузли, на які спирається цей вузол', + dependentsDescription: 'Вузли, які спираються на цей вузол', + }, + relationsTab: 'Відносин', }, } diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 257fd1ed67..6898ec9a88 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -109,9 +109,7 @@ const translation = { exitVersions: 'Phiên bản thoát', exportImage: 'Xuất hình ảnh', exportPNG: 'Xuất dưới dạng PNG', - noExist: 'Không có biến như vậy', exportJPEG: 'Xuất dưới dạng JPEG', - referenceVar: 'Biến tham chiếu', needAnswerNode: 'Nút Trả lời phải được thêm vào', addBlock: 'Thêm Node', needEndNode: 'Nút Kết thúc phải được thêm vào', @@ -608,7 +606,6 @@ const translation = { }, addSubVariable: 'Biến phụ', select: 'Lựa', - condition: 'Điều kiện', }, variableAssigner: { title: 'Gán biến', @@ -971,6 +968,15 @@ const translation = { }, settingsTab: 'Cài đặt', lastRunTab: 'Chạy Lần Cuối', + relations: { + noDependencies: 'Không phụ thuộc', + dependenciesDescription: 'Các nút mà nút này dựa vào', + dependents: 'Người phụ thuộc', + dependencies: 'Phụ thuộc', + noDependents: 'Không có người phụ thuộc', + dependentsDescription: 'Các nút dựa vào nút này', + }, + relationsTab: 'Mối quan hệ', }, } From 40a11b69422a1c50bb42509be664b1addc930a09 Mon Sep 17 00:00:00 2001 From: rouxiaomin <1530140574@qq.com> Date: Tue, 5 Aug 2025 19:57:43 +0800 Subject: [PATCH 149/415] =?UTF-8?q?feat(api):Enhance=20the=20scope=20of=20?= =?UTF-8?q?expired=20data=20cleanup=20table=20in=20the=20Dify=E2=80=A6=20(?= =?UTF-8?q?#23414)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api/models/workflow.py | 13 ++ .../clear_free_plan_tenant_expired_logs.py | 136 +++++++++++++- ...est_clear_free_plan_tenant_expired_logs.py | 168 ++++++++++++++++++ 3 files changed, 316 insertions(+), 1 deletion(-) create mode 100644 api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py diff --git a/api/models/workflow.py b/api/models/workflow.py index 9cf6a00456..453a650f84 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -864,6 +864,19 @@ class WorkflowAppLog(Base): created_by_role = CreatorUserRole(self.created_by_role) return db.session.get(EndUser, self.created_by) if created_by_role == CreatorUserRole.END_USER else None + def to_dict(self): + return { + "id": self.id, + "tenant_id": self.tenant_id, + "app_id": self.app_id, + "workflow_id": self.workflow_id, + "workflow_run_id": self.workflow_run_id, + "created_from": self.created_from, + "created_by_role": self.created_by_role, + "created_by": self.created_by, + "created_at": self.created_at, + } + class ConversationVariable(Base): __tablename__ = "workflow_conversation_variables" diff --git a/api/services/clear_free_plan_tenant_expired_logs.py b/api/services/clear_free_plan_tenant_expired_logs.py index d057a14afb..b28afcaa41 100644 --- a/api/services/clear_free_plan_tenant_expired_logs.py +++ b/api/services/clear_free_plan_tenant_expired_logs.py @@ -13,7 +13,19 @@ from core.model_runtime.utils.encoders import jsonable_encoder from extensions.ext_database import db from extensions.ext_storage import storage from models.account import Tenant -from models.model import App, Conversation, Message +from models.model import ( + App, + AppAnnotationHitHistory, + Conversation, + Message, + MessageAgentThought, + MessageAnnotation, + MessageChain, + MessageFeedback, + MessageFile, +) +from models.web import SavedMessage +from models.workflow import WorkflowAppLog from repositories.factory import DifyAPIRepositoryFactory from services.billing_service import BillingService @@ -21,6 +33,85 @@ logger = logging.getLogger(__name__) class ClearFreePlanTenantExpiredLogs: + @classmethod + def _clear_message_related_tables(cls, session: Session, tenant_id: str, batch_message_ids: list[str]) -> None: + """ + Clean up message-related tables to avoid data redundancy. + This method cleans up tables that have foreign key relationships with Message. + + Args: + session: Database session, the same with the one in process_tenant method + tenant_id: Tenant ID for logging purposes + batch_message_ids: List of message IDs to clean up + """ + if not batch_message_ids: + return + + # Clean up each related table + related_tables = [ + (MessageFeedback, "message_feedbacks"), + (MessageFile, "message_files"), + (MessageAnnotation, "message_annotations"), + (MessageChain, "message_chains"), + (MessageAgentThought, "message_agent_thoughts"), + (AppAnnotationHitHistory, "app_annotation_hit_histories"), + (SavedMessage, "saved_messages"), + ] + + for model, table_name in related_tables: + # Query records related to expired messages + records = ( + session.query(model) + .filter( + model.message_id.in_(batch_message_ids), # type: ignore + ) + .all() + ) + + if len(records) == 0: + continue + + # Save records before deletion + record_ids = [record.id for record in records] + try: + record_data = [] + for record in records: + try: + if hasattr(record, "to_dict"): + record_data.append(record.to_dict()) + else: + # if record doesn't have to_dict method, we need to transform it to dict manually + record_dict = {} + for column in record.__table__.columns: + record_dict[column.name] = getattr(record, column.name) + record_data.append(record_dict) + except Exception: + logger.exception("Failed to transform %s record: %s", table_name, record.id) + continue + + if record_data: + storage.save( + f"free_plan_tenant_expired_logs/" + f"{tenant_id}/{table_name}/{datetime.datetime.now().strftime('%Y-%m-%d')}" + f"-{time.time()}.json", + json.dumps( + jsonable_encoder(record_data), + ).encode("utf-8"), + ) + except Exception: + logger.exception("Failed to save %s records", table_name) + + session.query(model).filter( + model.id.in_(record_ids), # type: ignore + ).delete(synchronize_session=False) + + click.echo( + click.style( + f"[{datetime.datetime.now()}] Processed {len(record_ids)} " + f"{table_name} records for tenant {tenant_id}" + ) + ) + @classmethod def process_tenant(cls, flask_app: Flask, tenant_id: str, days: int, batch: int): with flask_app.app_context(): @@ -58,6 +149,7 @@ class ClearFreePlanTenantExpiredLogs: Message.id.in_(message_ids), ).delete(synchronize_session=False) + cls._clear_message_related_tables(session, tenant_id, message_ids) session.commit() click.echo( @@ -199,6 +291,48 @@ class ClearFreePlanTenantExpiredLogs: if len(workflow_runs) < batch: break + while True: + with Session(db.engine).no_autoflush as session: + workflow_app_logs = ( + session.query(WorkflowAppLog) + .filter( + WorkflowAppLog.tenant_id == tenant_id, + WorkflowAppLog.created_at < datetime.datetime.now() - datetime.timedelta(days=days), + ) + .limit(batch) + .all() + ) + + if len(workflow_app_logs) == 0: + break + + # save workflow app logs + storage.save( + f"free_plan_tenant_expired_logs/" + f"{tenant_id}/workflow_app_logs/{datetime.datetime.now().strftime('%Y-%m-%d')}" + f"-{time.time()}.json", + json.dumps( + jsonable_encoder( + [workflow_app_log.to_dict() for workflow_app_log in workflow_app_logs], + ), + ).encode("utf-8"), + ) + + workflow_app_log_ids = [workflow_app_log.id for workflow_app_log in workflow_app_logs] + + # delete workflow app logs + session.query(WorkflowAppLog).filter( + WorkflowAppLog.id.in_(workflow_app_log_ids), + ).delete(synchronize_session=False) + session.commit() + + click.echo( + click.style( + f"[{datetime.datetime.now()}] Processed {len(workflow_app_log_ids)}" + f" workflow app logs for tenant {tenant_id}" + ) + ) + @classmethod def process(cls, days: int, batch: int, tenant_ids: list[str]): """ diff --git a/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py b/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py new file mode 100644 index 0000000000..dd2bc21814 --- /dev/null +++ b/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py @@ -0,0 +1,168 @@ +import datetime +from unittest.mock import Mock, patch + +import pytest +from sqlalchemy.orm import Session + +from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpiredLogs + + +class TestClearFreePlanTenantExpiredLogs: + """Unit tests for ClearFreePlanTenantExpiredLogs._clear_message_related_tables method.""" + + @pytest.fixture + def mock_session(self): + """Create a mock database session.""" + session = Mock(spec=Session) + session.query.return_value.filter.return_value.all.return_value = [] + session.query.return_value.filter.return_value.delete.return_value = 0 + return session + + @pytest.fixture + def mock_storage(self): + """Create a mock storage object.""" + storage = Mock() + storage.save.return_value = None + return storage + + @pytest.fixture + def sample_message_ids(self): + """Sample message IDs for testing.""" + return ["msg-1", "msg-2", "msg-3"] + + @pytest.fixture + def sample_records(self): + """Sample records for testing.""" + records = [] + for i in range(3): + record = Mock() + record.id = f"record-{i}" + record.to_dict.return_value = { + "id": f"record-{i}", + "message_id": f"msg-{i}", + "created_at": datetime.datetime.now().isoformat(), + } + records.append(record) + return records + + def test_clear_message_related_tables_empty_message_ids(self, mock_session): + """Test that method returns early when message_ids is empty.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", []) + + # Should not call any database operations + mock_session.query.assert_not_called() + mock_storage.save.assert_not_called() + + def test_clear_message_related_tables_no_records_found(self, mock_session, sample_message_ids): + """Test when no related records are found.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + mock_session.query.return_value.filter.return_value.all.return_value = [] + + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should call query for each related table but find no records + assert mock_session.query.call_count > 0 + mock_storage.save.assert_not_called() + + def test_clear_message_related_tables_with_records_and_to_dict( + self, mock_session, sample_message_ids, sample_records + ): + """Test when records are found and have to_dict method.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + mock_session.query.return_value.filter.return_value.all.return_value = sample_records + + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should call to_dict on each record (called once per table, so 7 times total) + for record in sample_records: + assert record.to_dict.call_count == 7 + + # Should save backup data + assert mock_storage.save.call_count > 0 + + def test_clear_message_related_tables_with_records_no_to_dict(self, mock_session, sample_message_ids): + """Test when records are found but don't have to_dict method.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + # Create records without to_dict method + records = [] + for i in range(2): + record = Mock() + mock_table = Mock() + mock_id_column = Mock() + mock_id_column.name = "id" + mock_message_id_column = Mock() + mock_message_id_column.name = "message_id" + mock_table.columns = [mock_id_column, mock_message_id_column] + record.__table__ = mock_table + record.id = f"record-{i}" + record.message_id = f"msg-{i}" + del record.to_dict + records.append(record) + + # Mock records for first table only, empty for others + mock_session.query.return_value.filter.return_value.all.side_effect = [ + records, + [], + [], + [], + [], + [], + [], + ] + + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should save backup data even without to_dict + assert mock_storage.save.call_count > 0 + + def test_clear_message_related_tables_storage_error_continues( + self, mock_session, sample_message_ids, sample_records + ): + """Test that method continues even when storage.save fails.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + mock_storage.save.side_effect = Exception("Storage error") + + mock_session.query.return_value.filter.return_value.all.return_value = sample_records + + # Should not raise exception + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should still delete records even if backup fails + assert mock_session.query.return_value.filter.return_value.delete.called + + def test_clear_message_related_tables_serialization_error_continues(self, mock_session, sample_message_ids): + """Test that method continues even when record serialization fails.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + record = Mock() + record.id = "record-1" + record.to_dict.side_effect = Exception("Serialization error") + + mock_session.query.return_value.filter.return_value.all.return_value = [record] + + # Should not raise exception + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should still delete records even if serialization fails + assert mock_session.query.return_value.filter.return_value.delete.called + + def test_clear_message_related_tables_deletion_called(self, mock_session, sample_message_ids, sample_records): + """Test that deletion is called for found records.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + mock_session.query.return_value.filter.return_value.all.return_value = sample_records + + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + # Should call delete for each table that has records + assert mock_session.query.return_value.filter.return_value.delete.called + + def test_clear_message_related_tables_logging_output( + self, mock_session, sample_message_ids, sample_records, capsys + ): + """Test that logging output is generated.""" + with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: + mock_session.query.return_value.filter.return_value.all.return_value = sample_records + + ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) + + pass From fc5ed9f316610fb826a072ecf2c53bccf19adbcb Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 5 Aug 2025 20:15:39 +0800 Subject: [PATCH 150/415] Feat add testcontainers test for account service (#23380) --- .../services/__init__.py | 0 .../services/test_account_service.py | 3340 +++++++++++++++++ 2 files changed, 3340 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/__init__.py create mode 100644 api/tests/test_containers_integration_tests/services/test_account_service.py diff --git a/api/tests/test_containers_integration_tests/services/__init__.py b/api/tests/test_containers_integration_tests/services/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/services/test_account_service.py b/api/tests/test_containers_integration_tests/services/test_account_service.py new file mode 100644 index 0000000000..3d7be0df7d --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_account_service.py @@ -0,0 +1,3340 @@ +import json +from hashlib import sha256 +from unittest.mock import patch + +import pytest +from faker import Faker +from werkzeug.exceptions import Unauthorized + +from configs import dify_config +from controllers.console.error import AccountNotFound, NotAllowedCreateWorkspace +from models.account import AccountStatus, TenantAccountJoin +from services.account_service import AccountService, RegisterService, TenantService, TokenPair +from services.errors.account import ( + AccountAlreadyInTenantError, + AccountLoginError, + AccountNotFoundError, + AccountPasswordError, + AccountRegisterError, + CurrentPasswordIncorrectError, +) +from services.errors.workspace import WorkSpaceNotAllowedCreateError, WorkspacesLimitExceededError + + +class TestAccountService: + """Integration tests for AccountService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.account_service.FeatureService") as mock_feature_service, + patch("services.account_service.BillingService") as mock_billing_service, + patch("services.account_service.PassportService") as mock_passport_service, + ): + # Setup default mock returns + mock_feature_service.get_system_features.return_value.is_allow_register = True + mock_feature_service.get_system_features.return_value.is_allow_create_workspace = True + mock_feature_service.get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_billing_service.is_email_in_freeze.return_value = False + mock_passport_service.return_value.issue.return_value = "mock_jwt_token" + + yield { + "feature_service": mock_feature_service, + "billing_service": mock_billing_service, + "passport_service": mock_passport_service, + } + + def test_create_account_and_login(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account creation and login with correct password. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + assert account.email == email + assert account.status == AccountStatus.ACTIVE.value + + # Login with correct password + logged_in = AccountService.authenticate(email, password) + assert logged_in.id == account.id + + def test_create_account_without_password(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account creation without password (for OAuth users). + """ + fake = Faker() + email = fake.email() + name = fake.name() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=None, + ) + assert account.email == email + assert account.password is None + assert account.password_salt is None + + def test_create_account_registration_disabled(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account creation when registration is disabled. + """ + fake = Faker() + email = fake.email() + name = fake.name() + # Setup mocks to disable registration + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = False + + with pytest.raises(AccountNotFound): # AccountNotFound exception + AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=fake.password(length=12), + ) + + def test_create_account_email_in_freeze(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account creation when email is in freeze period. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = True + dify_config.BILLING_ENABLED = True + + with pytest.raises(AccountRegisterError): + AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + dify_config.BILLING_ENABLED = False # Reset config for other tests + + def test_authenticate_account_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test authentication with non-existent account. + """ + fake = Faker() + email = fake.email() + password = fake.password(length=12) + with pytest.raises(AccountNotFoundError): + AccountService.authenticate(email, password) + + def test_authenticate_banned_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test authentication with banned account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account first + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Ban the account + account.status = AccountStatus.BANNED.value + from extensions.ext_database import db + + db.session.commit() + + with pytest.raises(AccountLoginError): + AccountService.authenticate(email, password) + + def test_authenticate_wrong_password(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test authentication with wrong password. + """ + fake = Faker() + email = fake.email() + name = fake.name() + correct_password = fake.password(length=12) + wrong_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account first + AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=correct_password, + ) + + with pytest.raises(AccountPasswordError): + AccountService.authenticate(email, wrong_password) + + def test_authenticate_with_invite_token(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test authentication with invite token to set password for account without password. + """ + fake = Faker() + email = fake.email() + name = fake.name() + new_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account without password + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=None, + ) + + # Authenticate with invite token to set password + authenticated_account = AccountService.authenticate( + email, + new_password, + invite_token="valid_invite_token", + ) + + assert authenticated_account.id == account.id + assert authenticated_account.password is not None + assert authenticated_account.password_salt is not None + + def test_authenticate_pending_account_activation( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test authentication activates pending account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account with pending status + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + account.status = AccountStatus.PENDING.value + from extensions.ext_database import db + + db.session.commit() + + # Authenticate should activate the account + authenticated_account = AccountService.authenticate(email, password) + assert authenticated_account.status == AccountStatus.ACTIVE.value + assert authenticated_account.initialized_at is not None + + def test_update_account_password_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful password update. + """ + fake = Faker() + email = fake.email() + name = fake.name() + old_password = fake.password(length=12) + new_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=old_password, + ) + + # Update password + updated_account = AccountService.update_account_password(account, old_password, new_password) + + # Verify new password works + authenticated_account = AccountService.authenticate(email, new_password) + assert authenticated_account.id == account.id + + def test_update_account_password_wrong_current_password( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test password update with wrong current password. + """ + fake = Faker() + email = fake.email() + name = fake.name() + old_password = fake.password(length=12) + wrong_password = fake.password(length=12) + new_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=old_password, + ) + + with pytest.raises(CurrentPasswordIncorrectError): + AccountService.update_account_password(account, wrong_password, new_password) + + def test_update_account_password_invalid_new_password( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test password update with invalid new password format. + """ + fake = Faker() + email = fake.email() + name = fake.name() + old_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=old_password, + ) + + # Test with too short password (assuming minimum length validation) + with pytest.raises(ValueError): # Password validation error + AccountService.update_account_password(account, old_password, "123") + + def test_create_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account creation with automatic tenant creation. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + account = AccountService.create_account_and_tenant( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + assert account.email == email + + # Verify tenant was created and linked + from extensions.ext_database import db + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is not None + assert tenant_join.role == "owner" + + def test_create_account_and_tenant_workspace_creation_disabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test account creation when workspace creation is disabled. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = False + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + with pytest.raises(WorkSpaceNotAllowedCreateError): + AccountService.create_account_and_tenant( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + def test_create_account_and_tenant_workspace_limit_exceeded( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test account creation when workspace limit is exceeded. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = False + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + with pytest.raises(WorkspacesLimitExceededError): + AccountService.create_account_and_tenant( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + def test_link_account_integrate_new_provider(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test linking account with new OAuth provider. + """ + fake = Faker() + email = fake.email() + name = fake.name() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=None, + ) + + # Link with new provider + AccountService.link_account_integrate("new-google", "google_open_id_123", account) + + # Verify integration was created + from extensions.ext_database import db + from models.account import AccountIntegrate + + integration = db.session.query(AccountIntegrate).filter_by(account_id=account.id, provider="new-google").first() + assert integration is not None + assert integration.open_id == "google_open_id_123" + + def test_link_account_integrate_existing_provider( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test linking account with existing provider (should update). + """ + fake = Faker() + email = fake.email() + name = fake.name() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=None, + ) + + # Link with provider first time + AccountService.link_account_integrate("exists-google", "google_open_id_123", account) + + # Link with same provider but different open_id (should update) + AccountService.link_account_integrate("exists-google", "google_open_id_456", account) + + # Verify integration was updated + from extensions.ext_database import db + from models.account import AccountIntegrate + + integration = ( + db.session.query(AccountIntegrate).filter_by(account_id=account.id, provider="exists-google").first() + ) + assert integration.open_id == "google_open_id_456" + + def test_close_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test closing an account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Close account + AccountService.close_account(account) + + # Verify account status changed + from extensions.ext_database import db + + db.session.refresh(account) + assert account.status == AccountStatus.CLOSED.value + + def test_update_account_fields(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating account fields. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + updated_name = fake.name() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Update account fields + updated_account = AccountService.update_account(account, name=updated_name, interface_theme="dark") + + assert updated_account.name == updated_name + assert updated_account.interface_theme == "dark" + + def test_update_account_invalid_field(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating account with invalid field. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + with pytest.raises(AttributeError): + AccountService.update_account(account, invalid_field="value") + + def test_update_login_info(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating login information. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + ip_address = fake.ipv4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Update login info + AccountService.update_login_info(account, ip_address=ip_address) + + # Verify login info was updated + from extensions.ext_database import db + + db.session.refresh(account) + assert account.last_login_ip == ip_address + assert account.last_login_at is not None + + def test_login_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful login with token generation. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + ip_address = fake.ipv4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "mock_access_token" + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Login + token_pair = AccountService.login(account, ip_address=ip_address) + + assert isinstance(token_pair, TokenPair) + assert token_pair.access_token == "mock_access_token" + assert token_pair.refresh_token is not None + + # Verify passport service was called with correct parameters + mock_passport = mock_external_service_dependencies["passport_service"].return_value + mock_passport.issue.assert_called_once() + call_args = mock_passport.issue.call_args[0][0] + assert call_args["user_id"] == account.id + assert call_args["iss"] is not None + assert call_args["sub"] == "Console API Passport" + + def test_login_pending_account_activation(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test login activates pending account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "mock_access_token" + + # Create account with pending status + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + account.status = AccountStatus.PENDING.value + from extensions.ext_database import db + + db.session.commit() + + # Login should activate the account + token_pair = AccountService.login(account) + + db.session.refresh(account) + assert account.status == AccountStatus.ACTIVE.value + + def test_logout(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test logout functionality. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "mock_access_token" + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Login first to get refresh token + token_pair = AccountService.login(account) + + # Logout + AccountService.logout(account=account) + + # Verify refresh token was deleted from Redis + from extensions.ext_redis import redis_client + + refresh_token_key = f"account_refresh_token:{account.id}" + assert redis_client.get(refresh_token_key) is None + + def test_refresh_token_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful token refresh. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "new_mock_access_token" + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + # Create associated Tenant + TenantService.create_owner_tenant_if_not_exist(account=account, name=tenant_name, is_setup=True) + + # Login to get initial tokens + initial_token_pair = AccountService.login(account) + + # Refresh token + new_token_pair = AccountService.refresh_token(initial_token_pair.refresh_token) + + assert isinstance(new_token_pair, TokenPair) + assert new_token_pair.access_token == "new_mock_access_token" + assert new_token_pair.refresh_token != initial_token_pair.refresh_token + + def test_refresh_token_invalid_token(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test refresh token with invalid token. + """ + fake = Faker() + invalid_token = fake.uuid4() + with pytest.raises(ValueError, match="Invalid refresh token"): + AccountService.refresh_token(invalid_token) + + def test_refresh_token_invalid_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test refresh token with valid token but invalid account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "mock_access_token" + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Login to get tokens + token_pair = AccountService.login(account) + + # Delete account + from extensions.ext_database import db + + db.session.delete(account) + db.session.commit() + + # Try to refresh token with deleted account + with pytest.raises(ValueError, match="Invalid account"): + AccountService.refresh_token(token_pair.refresh_token) + + def test_load_user_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test loading user by ID successfully. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + # Create associated Tenant + TenantService.create_owner_tenant_if_not_exist(account=account, name=tenant_name, is_setup=True) + + # Load user + loaded_user = AccountService.load_user(account.id) + + assert loaded_user is not None + assert loaded_user.id == account.id + assert loaded_user.email == account.email + + def test_load_user_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test loading non-existent user. + """ + fake = Faker() + non_existent_user_id = fake.uuid4() + loaded_user = AccountService.load_user(non_existent_user_id) + assert loaded_user is None + + def test_load_user_banned_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test loading banned user raises Unauthorized. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Ban the account + account.status = AccountStatus.BANNED.value + from extensions.ext_database import db + + db.session.commit() + + with pytest.raises(Unauthorized): # Unauthorized exception + AccountService.load_user(account.id) + + def test_get_account_jwt_token(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test JWT token generation for account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + mock_external_service_dependencies["passport_service"].return_value.issue.return_value = "mock_jwt_token" + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate JWT token + token = AccountService.get_account_jwt_token(account) + + assert token == "mock_jwt_token" + + # Verify passport service was called with correct parameters + mock_passport = mock_external_service_dependencies["passport_service"].return_value + mock_passport.issue.assert_called_once() + call_args = mock_passport.issue.call_args[0][0] + assert call_args["user_id"] == account.id + assert call_args["iss"] is not None + assert call_args["sub"] == "Console API Passport" + + def test_load_logged_in_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test loading logged in account by ID. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + # Create associated Tenant + TenantService.create_owner_tenant_if_not_exist(account=account, name=tenant_name, is_setup=True) + + # Load logged in account + loaded_account = AccountService.load_logged_in_account(account_id=account.id) + + assert loaded_account is not None + assert loaded_account.id == account.id + + def test_get_user_through_email_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting user through email successfully. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Get user through email + found_user = AccountService.get_user_through_email(email) + + assert found_user is not None + assert found_user.id == account.id + + def test_get_user_through_email_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting user through non-existent email. + """ + fake = Faker() + non_existent_email = fake.email() + found_user = AccountService.get_user_through_email(non_existent_email) + assert found_user is None + + def test_get_user_through_email_banned_account( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting banned user through email raises Unauthorized. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Ban the account + account.status = AccountStatus.BANNED.value + from extensions.ext_database import db + + db.session.commit() + + with pytest.raises(Unauthorized): # Unauthorized exception + AccountService.get_user_through_email(email) + + def test_get_user_through_email_in_freeze(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting user through email that is in freeze period. + """ + fake = Faker() + email_in_freeze = fake.email() + # Setup mocks + dify_config.BILLING_ENABLED = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = True + + with pytest.raises(AccountRegisterError): + AccountService.get_user_through_email(email_in_freeze) + + # Reset config + dify_config.BILLING_ENABLED = False + + def test_delete_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account deletion (should add task to queue). + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + with patch("services.account_service.delete_account_task") as mock_delete_task: + # Delete account + AccountService.delete_account(account) + + # Verify task was added to queue + mock_delete_task.delay.assert_called_once_with(account.id) + + def test_generate_account_deletion_verification_code( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test generating account deletion verification code. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate verification code + token, code = AccountService.generate_account_deletion_verification_code(account) + + assert token is not None + assert code is not None + assert len(code) == 6 + assert code.isdigit() + + def test_verify_account_deletion_code_valid(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test verifying valid account deletion code. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate verification code + token, code = AccountService.generate_account_deletion_verification_code(account) + + # Verify code + is_valid = AccountService.verify_account_deletion_code(token, code) + assert is_valid is True + + def test_verify_account_deletion_code_invalid(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test verifying invalid account deletion code. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + wrong_code = fake.numerify(text="######") + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate verification code + token, code = AccountService.generate_account_deletion_verification_code(account) + + # Verify with wrong code + is_valid = AccountService.verify_account_deletion_code(token, wrong_code) + assert is_valid is False + + def test_verify_account_deletion_code_invalid_token( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test verifying account deletion code with invalid token. + """ + fake = Faker() + invalid_token = fake.uuid4() + invalid_code = fake.numerify(text="######") + is_valid = AccountService.verify_account_deletion_code(invalid_token, invalid_code) + assert is_valid is False + + +class TestTenantService: + """Integration tests for TenantService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.account_service.FeatureService") as mock_feature_service, + patch("services.account_service.BillingService") as mock_billing_service, + ): + # Setup default mock returns + mock_feature_service.get_system_features.return_value.is_allow_create_workspace = True + mock_feature_service.get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_billing_service.is_email_in_freeze.return_value = False + + yield { + "feature_service": mock_feature_service, + "billing_service": mock_billing_service, + } + + def test_create_tenant_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful tenant creation with default settings. + """ + fake = Faker() + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant + tenant = TenantService.create_tenant(name=tenant_name) + + assert tenant.name == tenant_name + assert tenant.plan == "basic" + assert tenant.status == "normal" + assert tenant.encrypt_public_key is not None + + def test_create_tenant_workspace_creation_disabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test tenant creation when workspace creation is disabled. + """ + fake = Faker() + tenant_name = fake.company() + # Setup mocks to disable workspace creation + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = False + + with pytest.raises(NotAllowedCreateWorkspace): # NotAllowedCreateWorkspace exception + TenantService.create_tenant(name=tenant_name) + + def test_create_tenant_with_custom_name(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test tenant creation with custom name and setup flag. + """ + fake = Faker() + custom_tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = False + + # Create tenant with setup flag (should bypass workspace creation restriction) + tenant = TenantService.create_tenant(name=custom_tenant_name, is_setup=True, is_from_dashboard=True) + + assert tenant.name == custom_tenant_name + assert tenant.plan == "basic" + assert tenant.status == "normal" + assert tenant.encrypt_public_key is not None + + def test_create_tenant_member_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful tenant member creation. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Create tenant member + tenant_member = TenantService.create_tenant_member(tenant, account, role="admin") + + assert tenant_member.tenant_id == tenant.id + assert tenant_member.account_id == account.id + assert tenant_member.role == "admin" + + def test_create_tenant_member_duplicate_owner(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test creating duplicate owner for a tenant (should fail). + """ + fake = Faker() + tenant_name = fake.company() + email1 = fake.email() + name1 = fake.name() + password1 = fake.password(length=12) + email2 = fake.email() + name2 = fake.name() + password2 = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + account1 = AccountService.create_account( + email=email1, + name=name1, + interface_language="en-US", + password=password1, + ) + account2 = AccountService.create_account( + email=email2, + name=name2, + interface_language="en-US", + password=password2, + ) + + # Create first owner + TenantService.create_tenant_member(tenant, account1, role="owner") + + # Try to create second owner (should fail) + with pytest.raises(Exception, match="Tenant already has an owner"): + TenantService.create_tenant_member(tenant, account2, role="owner") + + def test_create_tenant_member_existing_member(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating role for existing tenant member. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Create member with initial role + tenant_member1 = TenantService.create_tenant_member(tenant, account, role="normal") + assert tenant_member1.role == "normal" + + # Update member role + tenant_member2 = TenantService.create_tenant_member(tenant, account, role="editor") + assert tenant_member2.tenant_id == tenant_member1.tenant_id + assert tenant_member2.account_id == tenant_member1.account_id + assert tenant_member2.role == "editor" + + def test_get_join_tenants_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting join tenants for an account. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant1_name = fake.company() + tenant2_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account and tenants + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + tenant1 = TenantService.create_tenant(name=tenant1_name) + tenant2 = TenantService.create_tenant(name=tenant2_name) + + # Add account to both tenants + TenantService.create_tenant_member(tenant1, account, role="normal") + TenantService.create_tenant_member(tenant2, account, role="admin") + + # Get join tenants + join_tenants = TenantService.get_join_tenants(account) + + assert len(join_tenants) == 2 + tenant_names = [tenant.name for tenant in join_tenants] + assert tenant1_name in tenant_names + assert tenant2_name in tenant_names + + def test_get_current_tenant_by_account_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting current tenant by account successfully. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account and tenant + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + tenant = TenantService.create_tenant(name=tenant_name) + + # Add account to tenant and set as current + TenantService.create_tenant_member(tenant, account, role="owner") + account.current_tenant = tenant + from extensions.ext_database import db + + db.session.commit() + + # Get current tenant + current_tenant = TenantService.get_current_tenant_by_account(account) + + assert current_tenant.id == tenant.id + assert current_tenant.name == tenant.name + assert current_tenant.role == "owner" + + def test_get_current_tenant_by_account_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting current tenant when account has no current tenant. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account without setting current tenant + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Try to get current tenant (should fail) + with pytest.raises(AttributeError): + TenantService.get_current_tenant_by_account(account) + + def test_switch_tenant_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful tenant switching. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant1_name = fake.company() + tenant2_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account and tenants + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + tenant1 = TenantService.create_tenant(name=tenant1_name) + tenant2 = TenantService.create_tenant(name=tenant2_name) + + # Add account to both tenants + TenantService.create_tenant_member(tenant1, account, role="owner") + TenantService.create_tenant_member(tenant2, account, role="admin") + + # Set initial current tenant + account.current_tenant = tenant1 + from extensions.ext_database import db + + db.session.commit() + + # Switch to second tenant + TenantService.switch_tenant(account, tenant2.id) + + # Verify tenant was switched + db.session.refresh(account) + assert account.current_tenant_id == tenant2.id + + def test_switch_tenant_no_tenant_id(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test tenant switching without providing tenant ID. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Try to switch tenant without providing tenant ID + with pytest.raises(ValueError, match="Tenant ID must be provided"): + TenantService.switch_tenant(account, None) + + def test_switch_tenant_account_not_member(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test switching to a tenant where account is not a member. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + tenant_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create account and tenant + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + tenant = TenantService.create_tenant(name=tenant_name) + + # Try to switch to tenant where account is not a member + with pytest.raises(Exception, match="Tenant not found or account is not a member of the tenant"): + TenantService.switch_tenant(account, tenant.id) + + def test_has_roles_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test checking if tenant has specific roles. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + admin_email = fake.email() + admin_name = fake.name() + admin_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + admin_account = AccountService.create_account( + email=admin_email, + name=admin_name, + interface_language="en-US", + password=admin_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, admin_account, role="admin") + + # Check if tenant has owner role + from models.account import TenantAccountRole + + has_owner = TenantService.has_roles(tenant, [TenantAccountRole.OWNER]) + assert has_owner is True + + # Check if tenant has admin role + has_admin = TenantService.has_roles(tenant, [TenantAccountRole.ADMIN]) + assert has_admin is True + + # Check if tenant has normal role (should be False) + has_normal = TenantService.has_roles(tenant, [TenantAccountRole.NORMAL]) + assert has_normal is False + + def test_has_roles_invalid_role_type(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test checking roles with invalid role type. + """ + fake = Faker() + tenant_name = fake.company() + invalid_role = fake.word() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant + tenant = TenantService.create_tenant(name=tenant_name) + + # Try to check roles with invalid role type + with pytest.raises(ValueError, match="all roles must be TenantAccountRole"): + TenantService.has_roles(tenant, [invalid_role]) + + def test_get_user_role_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting user role in a tenant. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Add account to tenant with specific role + TenantService.create_tenant_member(tenant, account, role="editor") + + # Get user role + user_role = TenantService.get_user_role(account, tenant) + + assert user_role == "editor" + + def test_check_member_permission_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test checking member permission successfully. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + member_email = fake.email() + member_name = fake.name() + member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + member_account = AccountService.create_account( + email=member_email, + name=member_name, + interface_language="en-US", + password=member_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, member_account, role="normal") + + # Check owner permission to add member (should succeed) + TenantService.check_member_permission(tenant, owner_account, member_account, "add") + + def test_check_member_permission_invalid_action( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test checking member permission with invalid action. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + invalid_action = fake.word() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Add account to tenant + TenantService.create_tenant_member(tenant, account, role="owner") + + # Try to check permission with invalid action + with pytest.raises(Exception, match="Invalid action"): + TenantService.check_member_permission(tenant, account, None, invalid_action) + + def test_check_member_permission_operate_self(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test checking member permission when trying to operate self. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Add account to tenant + TenantService.create_tenant_member(tenant, account, role="owner") + + # Try to check permission to operate self + with pytest.raises(Exception, match="Cannot operate self"): + TenantService.check_member_permission(tenant, account, account, "remove") + + def test_remove_member_from_tenant_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful member removal from tenant. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + member_email = fake.email() + member_name = fake.name() + member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + member_account = AccountService.create_account( + email=member_email, + name=member_name, + interface_language="en-US", + password=member_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, member_account, role="normal") + + # Remove member + TenantService.remove_member_from_tenant(tenant, member_account, owner_account) + + # Verify member was removed + from extensions.ext_database import db + from models.account import TenantAccountJoin + + member_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=member_account.id).first() + ) + assert member_join is None + + def test_remove_member_from_tenant_operate_self( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test removing member when trying to operate self. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Add account to tenant + TenantService.create_tenant_member(tenant, account, role="owner") + + # Try to remove self + with pytest.raises(Exception, match="Cannot operate self"): + TenantService.remove_member_from_tenant(tenant, account, account) + + def test_remove_member_from_tenant_not_member(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test removing member who is not in the tenant. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + non_member_email = fake.email() + non_member_name = fake.name() + non_member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + non_member_account = AccountService.create_account( + email=non_member_email, + name=non_member_name, + interface_language="en-US", + password=non_member_password, + ) + + # Add only owner to tenant + TenantService.create_tenant_member(tenant, owner_account, role="owner") + + # Try to remove non-member + with pytest.raises(Exception, match="Member not in tenant"): + TenantService.remove_member_from_tenant(tenant, non_member_account, owner_account) + + def test_update_member_role_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful member role update. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + member_email = fake.email() + member_name = fake.name() + member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + member_account = AccountService.create_account( + email=member_email, + name=member_name, + interface_language="en-US", + password=member_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, member_account, role="normal") + + # Update member role + TenantService.update_member_role(tenant, member_account, "admin", owner_account) + + # Verify role was updated + from extensions.ext_database import db + from models.account import TenantAccountJoin + + member_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=member_account.id).first() + ) + assert member_join.role == "admin" + + def test_update_member_role_to_owner(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating member role to owner (should change current owner to admin). + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + member_email = fake.email() + member_name = fake.name() + member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + member_account = AccountService.create_account( + email=member_email, + name=member_name, + interface_language="en-US", + password=member_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, member_account, role="admin") + + # Update member role to owner + TenantService.update_member_role(tenant, member_account, "owner", owner_account) + + # Verify roles were updated correctly + from extensions.ext_database import db + from models.account import TenantAccountJoin + + owner_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=owner_account.id).first() + ) + member_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=member_account.id).first() + ) + assert owner_join.role == "admin" + assert member_join.role == "owner" + + def test_update_member_role_already_assigned(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating member role to already assigned role. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + member_email = fake.email() + member_name = fake.name() + member_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + member_account = AccountService.create_account( + email=member_email, + name=member_name, + interface_language="en-US", + password=member_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, member_account, role="admin") + + # Try to update member role to already assigned role + with pytest.raises(Exception, match="The provided role is already assigned to the member"): + TenantService.update_member_role(tenant, member_account, "admin", owner_account) + + def test_get_tenant_count_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting tenant count successfully. + """ + fake = Faker() + tenant1_name = fake.company() + tenant2_name = fake.company() + tenant3_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create multiple tenants + tenant1 = TenantService.create_tenant(name=tenant1_name) + tenant2 = TenantService.create_tenant(name=tenant2_name) + tenant3 = TenantService.create_tenant(name=tenant3_name) + + # Get tenant count + tenant_count = TenantService.get_tenant_count() + + # Should have at least 3 tenants (may be more from other tests) + assert tenant_count >= 3 + + def test_create_owner_tenant_if_not_exist_new_user( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating owner tenant for new user without existing tenants. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + workspace_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Create owner tenant + TenantService.create_owner_tenant_if_not_exist(account, name=workspace_name) + + # Verify tenant was created and linked + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is not None + assert tenant_join.role == "owner" + assert account.current_tenant is not None + assert account.current_tenant.name == workspace_name + + def test_create_owner_tenant_if_not_exist_existing_tenant( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating owner tenant when user already has a tenant. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + existing_tenant_name = fake.company() + new_workspace_name = fake.company() + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + + # Create account and existing tenant + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + existing_tenant = TenantService.create_tenant(name=existing_tenant_name) + TenantService.create_tenant_member(existing_tenant, account, role="owner") + account.current_tenant = existing_tenant + from extensions.ext_database import db + + db.session.commit() + + # Try to create owner tenant again (should not create new one) + TenantService.create_owner_tenant_if_not_exist(account, name=new_workspace_name) + + # Verify no new tenant was created + tenant_joins = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).all() + assert len(tenant_joins) == 1 + assert account.current_tenant.id == existing_tenant.id + + def test_create_owner_tenant_if_not_exist_workspace_disabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating owner tenant when workspace creation is disabled. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + workspace_name = fake.company() + # Setup mocks to disable workspace creation + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Try to create owner tenant (should fail) + with pytest.raises(WorkSpaceNotAllowedCreateError): # WorkSpaceNotAllowedCreateError exception + TenantService.create_owner_tenant_if_not_exist(account, name=workspace_name) + + def test_get_tenant_members_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting tenant members successfully. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + admin_email = fake.email() + admin_name = fake.name() + admin_password = fake.password(length=12) + normal_email = fake.email() + normal_name = fake.name() + normal_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + admin_account = AccountService.create_account( + email=admin_email, + name=admin_name, + interface_language="en-US", + password=admin_password, + ) + normal_account = AccountService.create_account( + email=normal_email, + name=normal_name, + interface_language="en-US", + password=normal_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, admin_account, role="admin") + TenantService.create_tenant_member(tenant, normal_account, role="normal") + + # Get tenant members + members = TenantService.get_tenant_members(tenant) + + assert len(members) == 3 + member_emails = [member.email for member in members] + assert owner_email in member_emails + assert admin_email in member_emails + assert normal_email in member_emails + + # Verify roles are set correctly + for member in members: + if member.email == owner_email: + assert member.role == "owner" + elif member.email == admin_email: + assert member.role == "admin" + elif member.email == normal_email: + assert member.role == "normal" + + def test_get_dataset_operator_members_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting dataset operator members successfully. + """ + fake = Faker() + tenant_name = fake.company() + owner_email = fake.email() + owner_name = fake.name() + owner_password = fake.password(length=12) + operator_email = fake.email() + operator_name = fake.name() + operator_password = fake.password(length=12) + normal_email = fake.email() + normal_name = fake.name() + normal_password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant and accounts + tenant = TenantService.create_tenant(name=tenant_name) + owner_account = AccountService.create_account( + email=owner_email, + name=owner_name, + interface_language="en-US", + password=owner_password, + ) + dataset_operator_account = AccountService.create_account( + email=operator_email, + name=operator_name, + interface_language="en-US", + password=operator_password, + ) + normal_account = AccountService.create_account( + email=normal_email, + name=normal_name, + interface_language="en-US", + password=normal_password, + ) + + # Add members with different roles + TenantService.create_tenant_member(tenant, owner_account, role="owner") + TenantService.create_tenant_member(tenant, dataset_operator_account, role="dataset_operator") + TenantService.create_tenant_member(tenant, normal_account, role="normal") + + # Get dataset operator members + dataset_operators = TenantService.get_dataset_operator_members(tenant) + + assert len(dataset_operators) == 1 + assert dataset_operators[0].email == operator_email + assert dataset_operators[0].role == "dataset_operator" + + def test_get_custom_config_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting custom config successfully. + """ + fake = Faker() + tenant_name = fake.company() + theme = fake.random_element(elements=("dark", "light")) + language = fake.random_element(elements=("zh-CN", "en-US")) + # Setup mocks + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + + # Create tenant with custom config + tenant = TenantService.create_tenant(name=tenant_name) + + # Set custom config + custom_config = {"theme": theme, "language": language, "feature_flags": {"beta": True}} + tenant.custom_config_dict = custom_config + from extensions.ext_database import db + + db.session.commit() + + # Get custom config + retrieved_config = TenantService.get_custom_config(tenant.id) + + assert retrieved_config == custom_config + assert retrieved_config["theme"] == theme + assert retrieved_config["language"] == language + assert retrieved_config["feature_flags"]["beta"] is True + + +class TestRegisterService: + """Integration tests for RegisterService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.account_service.FeatureService") as mock_feature_service, + patch("services.account_service.BillingService") as mock_billing_service, + patch("services.account_service.PassportService") as mock_passport_service, + ): + # Setup default mock returns + mock_feature_service.get_system_features.return_value.is_allow_register = True + mock_feature_service.get_system_features.return_value.is_allow_create_workspace = True + mock_feature_service.get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_billing_service.is_email_in_freeze.return_value = False + mock_passport_service.return_value.issue.return_value = "mock_jwt_token" + + yield { + "feature_service": mock_feature_service, + "billing_service": mock_billing_service, + "passport_service": mock_passport_service, + } + + def test_setup_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful system setup with account creation and tenant setup. + """ + fake = Faker() + admin_email = fake.email() + admin_name = fake.name() + admin_password = fake.password(length=12) + ip_address = fake.ipv4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Execute setup + RegisterService.setup( + email=admin_email, + name=admin_name, + password=admin_password, + ip_address=ip_address, + ) + + # Verify account was created + from extensions.ext_database import db + from models.account import Account + from models.model import DifySetup + + account = db.session.query(Account).filter_by(email=admin_email).first() + assert account is not None + assert account.name == admin_name + assert account.last_login_ip == ip_address + assert account.initialized_at is not None + assert account.status == "active" + + # Verify DifySetup was created + dify_setup = db.session.query(DifySetup).first() + assert dify_setup is not None + + # Verify tenant was created and linked + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is not None + assert tenant_join.role == "owner" + + def test_setup_failure_rollback(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test setup failure with proper rollback of all created entities. + """ + fake = Faker() + admin_email = fake.email() + admin_name = fake.name() + admin_password = fake.password(length=12) + ip_address = fake.ipv4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Mock AccountService.create_account to raise exception + with patch("services.account_service.AccountService.create_account") as mock_create_account: + mock_create_account.side_effect = Exception("Database error") + + # Execute setup and verify exception + with pytest.raises(ValueError, match="Setup failed: Database error"): + RegisterService.setup( + email=admin_email, + name=admin_name, + password=admin_password, + ip_address=ip_address, + ) + + # Verify no entities were created (rollback worked) + from extensions.ext_database import db + from models.account import Account, Tenant, TenantAccountJoin + from models.model import DifySetup + + account = db.session.query(Account).filter_by(email=admin_email).first() + tenant_count = db.session.query(Tenant).count() + tenant_join_count = db.session.query(TenantAccountJoin).count() + dify_setup_count = db.session.query(DifySetup).count() + + assert account is None + assert tenant_count == 0 + assert tenant_join_count == 0 + assert dify_setup_count == 0 + + def test_register_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful account registration with workspace creation. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Execute registration + account = RegisterService.register( + email=email, + name=name, + password=password, + language=language, + ) + + # Verify account was created + assert account.email == email + assert account.name == name + assert account.status == "active" + assert account.initialized_at is not None + + # Verify tenant was created and linked + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is not None + assert tenant_join.role == "owner" + assert account.current_tenant is not None + assert account.current_tenant.name == f"{name}'s Workspace" + + def test_register_with_oauth(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account registration with OAuth integration. + """ + fake = Faker() + email = fake.email() + name = fake.name() + open_id = fake.uuid4() + provider = fake.random_element(elements=("google", "github", "microsoft")) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Execute registration with OAuth + account = RegisterService.register( + email=email, + name=name, + password=None, + open_id=open_id, + provider=provider, + language=language, + ) + + # Verify account was created + assert account.email == email + assert account.name == name + assert account.status == "active" + assert account.initialized_at is not None + + # Verify OAuth integration was created + from extensions.ext_database import db + from models.account import AccountIntegrate + + integration = db.session.query(AccountIntegrate).filter_by(account_id=account.id, provider=provider).first() + assert integration is not None + assert integration.open_id == open_id + + def test_register_with_pending_status(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account registration with pending status. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Execute registration with pending status + from models.account import AccountStatus + + account = RegisterService.register( + email=email, + name=name, + password=password, + language=language, + status=AccountStatus.PENDING, + ) + + # Verify account was created with pending status + assert account.email == email + assert account.name == name + assert account.status == "pending" + assert account.initialized_at is not None + + # Verify tenant was created and linked + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is not None + assert tenant_join.role == "owner" + + def test_register_workspace_creation_disabled(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account registration when workspace creation is disabled. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = False + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # with pytest.raises(AccountRegisterError, match="Workspace is not allowed to create."): + account = RegisterService.register( + email=email, + name=name, + password=password, + language=language, + ) + + # Verify account was created with no tenant + assert account.email == email + assert account.name == name + assert account.status == "active" + assert account.initialized_at is not None + + # Verify tenant was created and linked + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is None + + def test_register_workspace_limit_exceeded(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account registration when workspace limit is exceeded. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = False + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # with pytest.raises(AccountRegisterError, match="Workspace is not allowed to create."): + account = RegisterService.register( + email=email, + name=name, + password=password, + language=language, + ) + + # Verify account was created with no tenant + assert account.email == email + assert account.name == name + assert account.status == "active" + assert account.initialized_at is not None + + # Verify tenant was created and linked + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is None + + def test_register_without_workspace(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test account registration without workspace creation. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Execute registration without workspace creation + account = RegisterService.register( + email=email, + name=name, + password=password, + language=language, + create_workspace_required=False, + ) + + # Verify account was created + assert account.email == email + assert account.name == name + assert account.status == "active" + assert account.initialized_at is not None + + # Verify no tenant was created + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = db.session.query(TenantAccountJoin).filter_by(account_id=account.id).first() + assert tenant_join is None + + def test_invite_new_member_new_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test inviting a new member who doesn't have an account yet. + """ + fake = Faker() + tenant_name = fake.company() + inviter_email = fake.email() + inviter_name = fake.name() + inviter_password = fake.password(length=12) + new_member_email = fake.email() + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.is_allow_create_workspace = True + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.license.workspaces.is_available.return_value = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and inviter account + tenant = TenantService.create_tenant(name=tenant_name) + inviter = AccountService.create_account( + email=inviter_email, + name=inviter_name, + interface_language="en-US", + password=inviter_password, + ) + TenantService.create_tenant_member(tenant, inviter, role="owner") + + # Mock the email task + with patch("services.account_service.send_invite_member_mail_task") as mock_send_mail: + mock_send_mail.delay.return_value = None + + # Execute invitation + token = RegisterService.invite_new_member( + tenant=tenant, + email=new_member_email, + language=language, + role="normal", + inviter=inviter, + ) + + # Verify token was generated + assert token is not None + assert len(token) > 0 + + # Verify email task was called + mock_send_mail.delay.assert_called_once() + + # Verify new account was created with pending status + from extensions.ext_database import db + from models.account import Account, TenantAccountJoin + + new_account = db.session.query(Account).filter_by(email=new_member_email).first() + assert new_account is not None + assert new_account.name == new_member_email.split("@")[0] # Default name from email + assert new_account.status == "pending" + + # Verify tenant member was created + tenant_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=new_account.id).first() + ) + assert tenant_join is not None + assert tenant_join.role == "normal" + + def test_invite_new_member_existing_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test inviting an existing member who is not in the tenant yet. + """ + fake = Faker() + tenant_name = fake.company() + inviter_email = fake.email() + inviter_name = fake.name() + inviter_password = fake.password(length=12) + existing_member_email = fake.email() + existing_member_name = fake.name() + existing_member_password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and inviter account + tenant = TenantService.create_tenant(name=tenant_name) + inviter = AccountService.create_account( + email=inviter_email, + name=inviter_name, + interface_language="en-US", + password=inviter_password, + ) + TenantService.create_tenant_member(tenant, inviter, role="owner") + + # Create existing account + existing_account = AccountService.create_account( + email=existing_member_email, + name=existing_member_name, + interface_language="en-US", + password=existing_member_password, + ) + + # Mock the email task + with patch("services.account_service.send_invite_member_mail_task") as mock_send_mail: + mock_send_mail.delay.return_value = None + with pytest.raises(AccountAlreadyInTenantError, match="Account already in tenant."): + # Execute invitation + token = RegisterService.invite_new_member( + tenant=tenant, + email=existing_member_email, + language=language, + role="admin", + inviter=inviter, + ) + + # Verify email task was not called + mock_send_mail.delay.assert_not_called() + + # Verify tenant member was created for existing account + from extensions.ext_database import db + from models.account import TenantAccountJoin + + tenant_join = ( + db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=existing_account.id).first() + ) + assert tenant_join is not None + assert tenant_join.role == "admin" + + def test_invite_new_member_existing_member(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test inviting a member who is already in the tenant with pending status. + """ + fake = Faker() + tenant_name = fake.company() + inviter_email = fake.email() + inviter_name = fake.name() + inviter_password = fake.password(length=12) + existing_pending_member_email = fake.email() + existing_pending_member_name = fake.name() + existing_pending_member_password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and inviter account + tenant = TenantService.create_tenant(name=tenant_name) + inviter = AccountService.create_account( + email=inviter_email, + name=inviter_name, + interface_language="en-US", + password=inviter_password, + ) + TenantService.create_tenant_member(tenant, inviter, role="owner") + + # Create existing account with pending status + existing_account = AccountService.create_account( + email=existing_pending_member_email, + name=existing_pending_member_name, + interface_language="en-US", + password=existing_pending_member_password, + ) + existing_account.status = "pending" + from extensions.ext_database import db + + db.session.commit() + + # Add existing account to tenant + TenantService.create_tenant_member(tenant, existing_account, role="normal") + + # Mock the email task + with patch("services.account_service.send_invite_member_mail_task") as mock_send_mail: + mock_send_mail.delay.return_value = None + + # Execute invitation (should resend email for pending member) + token = RegisterService.invite_new_member( + tenant=tenant, + email=existing_pending_member_email, + language=language, + role="normal", + inviter=inviter, + ) + + # Verify token was generated + assert token is not None + assert len(token) > 0 + + # Verify email task was called + mock_send_mail.delay.assert_called_once() + + def test_invite_new_member_no_inviter(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test inviting a member without providing an inviter. + """ + fake = Faker() + tenant_name = fake.company() + new_member_email = fake.email() + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant + tenant = TenantService.create_tenant(name=tenant_name) + + # Execute invitation without inviter (should fail) + with pytest.raises(ValueError, match="Inviter is required"): + RegisterService.invite_new_member( + tenant=tenant, + email=new_member_email, + language=language, + role="normal", + inviter=None, + ) + + def test_invite_new_member_account_already_in_tenant( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test inviting a member who is already in the tenant with active status. + """ + fake = Faker() + tenant_name = fake.company() + inviter_email = fake.email() + inviter_name = fake.name() + inviter_password = fake.password(length=12) + already_in_tenant_email = fake.email() + already_in_tenant_name = fake.name() + already_in_tenant_password = fake.password(length=12) + language = fake.random_element(elements=("en-US", "zh-CN")) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and inviter account + tenant = TenantService.create_tenant(name=tenant_name) + inviter = AccountService.create_account( + email=inviter_email, + name=inviter_name, + interface_language="en-US", + password=inviter_password, + ) + TenantService.create_tenant_member(tenant, inviter, role="owner") + + # Create existing account with active status + existing_account = AccountService.create_account( + email=already_in_tenant_email, + name=already_in_tenant_name, + interface_language="en-US", + password=already_in_tenant_password, + ) + existing_account.status = "active" + from extensions.ext_database import db + + db.session.commit() + + # Add existing account to tenant + TenantService.create_tenant_member(tenant, existing_account, role="normal") + + # Execute invitation (should fail for active member) + with pytest.raises(AccountAlreadyInTenantError, match="Account already in tenant."): + RegisterService.invite_new_member( + tenant=tenant, + email=already_in_tenant_email, + language=language, + role="normal", + inviter=inviter, + ) + + def test_generate_invite_token_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful generation of invite token. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Execute token generation + token = RegisterService.generate_invite_token(tenant, account) + + # Verify token was generated + assert token is not None + assert len(token) > 0 + + # Verify token was stored in Redis + from extensions.ext_redis import redis_client + + token_key = RegisterService._get_invitation_token_key(token) + stored_data = redis_client.get(token_key) + assert stored_data is not None + + # Verify stored data contains correct information + import json + + invitation_data = json.loads(stored_data.decode("utf-8")) + assert invitation_data["account_id"] == str(account.id) + assert invitation_data["email"] == account.email + assert invitation_data["workspace_id"] == tenant.id + + def test_is_valid_invite_token_valid(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation of valid invite token. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate a real token + token = RegisterService.generate_invite_token(tenant, account) + + # Execute validation + is_valid = RegisterService.is_valid_invite_token(token) + + # Verify token is valid + assert is_valid is True + + def test_is_valid_invite_token_invalid(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test validation of invalid invite token. + """ + fake = Faker() + invalid_token = fake.uuid4() + # Execute validation with non-existent token + is_valid = RegisterService.is_valid_invite_token(invalid_token) + + # Verify token is invalid + assert is_valid is False + + def test_revoke_token_with_workspace_and_email( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test revoking token with workspace ID and email. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate a real token + token = RegisterService.generate_invite_token(tenant, account) + + # Verify token exists in Redis before revocation + from extensions.ext_redis import redis_client + + token_key = RegisterService._get_invitation_token_key(token) + assert redis_client.get(token_key) is not None + + # Execute token revocation + RegisterService.revoke_token( + workspace_id=tenant.id, + email=account.email, + token=token, + ) + + # Verify token was not deleted from Redis + assert redis_client.get(token_key) is not None + + def test_revoke_token_without_workspace_and_email( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test revoking token without workspace ID and email. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Generate a real token + token = RegisterService.generate_invite_token(tenant, account) + + # Verify token exists in Redis before revocation + from extensions.ext_redis import redis_client + + token_key = RegisterService._get_invitation_token_key(token) + assert redis_client.get(token_key) is not None + + # Execute token revocation without workspace and email + RegisterService.revoke_token( + workspace_id="", + email="", + token=token, + ) + + # Verify token was deleted from Redis + assert redis_client.get(token_key) is None + + def test_get_invitation_if_token_valid_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation data with valid token. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + TenantService.create_tenant_member(tenant, account, role="normal") + + # Generate a real token + token = RegisterService.generate_invite_token(tenant, account) + + email_hash = sha256(account.email.encode()).hexdigest() + cache_key = f"member_invite_token:{tenant.id}, {email_hash}:{token}" + from extensions.ext_redis import redis_client + + redis_client.setex(cache_key, 24 * 60 * 60, account.id) + + # Execute invitation retrieval + result = RegisterService.get_invitation_if_token_valid( + workspace_id=tenant.id, + email=account.email, + token=token, + ) + + # Verify result contains expected data + assert result is not None + assert result["account"].id == account.id + assert result["tenant"].id == tenant.id + assert result["data"]["account_id"] == str(account.id) + assert result["data"]["email"] == account.email + assert result["data"]["workspace_id"] == tenant.id + + def test_get_invitation_if_token_valid_invalid_token( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation data with invalid token. + """ + fake = Faker() + workspace_id = fake.uuid4() + email = fake.email() + invalid_token = fake.uuid4() + # Execute invitation retrieval with invalid token + result = RegisterService.get_invitation_if_token_valid( + workspace_id=workspace_id, + email=email, + token=invalid_token, + ) + + # Verify result is None + assert result is None + + def test_get_invitation_if_token_valid_invalid_tenant( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation data with invalid tenant. + """ + fake = Faker() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + invalid_tenant_id = fake.uuid4() + token = fake.uuid4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create account + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + + # Create a real token but with non-existent tenant ID + from extensions.ext_redis import redis_client + + invitation_data = { + "account_id": str(account.id), + "email": account.email, + "workspace_id": invalid_tenant_id, + } + token_key = RegisterService._get_invitation_token_key(token) + import json + + redis_client.setex(token_key, 24 * 60 * 60, json.dumps(invitation_data)) + + # Execute invitation retrieval + result = RegisterService.get_invitation_if_token_valid( + workspace_id=invalid_tenant_id, + email=account.email, + token=token, + ) + + # Verify result is None (tenant not found) + assert result is None + + # Clean up + redis_client.delete(token_key) + + def test_get_invitation_if_token_valid_account_mismatch( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation data with account ID mismatch. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + token = fake.uuid4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + TenantService.create_tenant_member(tenant, account, role="normal") + + # Create a real token but with mismatched account ID + from extensions.ext_redis import redis_client + + invitation_data = { + "account_id": "different-account-id", # Different from actual account ID + "email": account.email, + "workspace_id": tenant.id, + } + token_key = RegisterService._get_invitation_token_key(token) + redis_client.setex(token_key, 24 * 60 * 60, json.dumps(invitation_data)) + + # Execute invitation retrieval + result = RegisterService.get_invitation_if_token_valid( + workspace_id=tenant.id, + email=account.email, + token=token, + ) + + # Verify result is None (account ID mismatch) + assert result is None + + # Clean up + redis_client.delete(token_key) + + def test_get_invitation_if_token_valid_tenant_not_normal( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation data with tenant not in normal status. + """ + fake = Faker() + tenant_name = fake.company() + email = fake.email() + name = fake.name() + password = fake.password(length=12) + token = fake.uuid4() + # Setup mocks + mock_external_service_dependencies["feature_service"].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies["billing_service"].is_email_in_freeze.return_value = False + + # Create tenant and account + tenant = TenantService.create_tenant(name=tenant_name) + account = AccountService.create_account( + email=email, + name=name, + interface_language="en-US", + password=password, + ) + TenantService.create_tenant_member(tenant, account, role="normal") + + # Change tenant status to non-normal + tenant.status = "suspended" + from extensions.ext_database import db + + db.session.commit() + + # Create a real token + from extensions.ext_redis import redis_client + + invitation_data = { + "account_id": str(account.id), + "email": account.email, + "workspace_id": tenant.id, + } + token_key = RegisterService._get_invitation_token_key(token) + import json + + redis_client.setex(token_key, 24 * 60 * 60, json.dumps(invitation_data)) + + # Execute invitation retrieval + result = RegisterService.get_invitation_if_token_valid( + workspace_id=tenant.id, + email=account.email, + token=token, + ) + + # Verify result is None (tenant not in normal status) + assert result is None + + # Clean up + redis_client.delete(token_key) + + def test_get_invitation_by_token_with_workspace_and_email( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation by token with workspace ID and email. + """ + fake = Faker() + token = fake.uuid4() + workspace_id = fake.uuid4() + email = fake.email() + + # Create the cache key as the service does + from hashlib import sha256 + + from extensions.ext_redis import redis_client + + email_hash = sha256(email.encode()).hexdigest() + cache_key = f"member_invite_token:{workspace_id}, {email_hash}:{token}" + + # Store account ID in Redis + account_id = fake.uuid4() + redis_client.setex(cache_key, 24 * 60 * 60, account_id) + + # Execute invitation retrieval + result = RegisterService._get_invitation_by_token( + token=token, + workspace_id=workspace_id, + email=email, + ) + + # Verify result contains expected data + assert result is not None + assert result["account_id"] == account_id + assert result["email"] == email + assert result["workspace_id"] == workspace_id + + # Clean up + redis_client.delete(cache_key) + + def test_get_invitation_by_token_without_workspace_and_email( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting invitation by token without workspace ID and email. + """ + fake = Faker() + token = fake.uuid4() + invitation_data = { + "account_id": fake.uuid4(), + "email": fake.email(), + "workspace_id": fake.uuid4(), + } + + # Store invitation data in Redis using standard token key + from extensions.ext_redis import redis_client + + token_key = RegisterService._get_invitation_token_key(token) + import json + + redis_client.setex(token_key, 24 * 60 * 60, json.dumps(invitation_data)) + + # Execute invitation retrieval + result = RegisterService._get_invitation_by_token(token=token) + + # Verify result contains expected data + assert result is not None + assert result["account_id"] == invitation_data["account_id"] + assert result["email"] == invitation_data["email"] + assert result["workspace_id"] == invitation_data["workspace_id"] + + # Clean up + redis_client.delete(token_key) + + def test_get_invitation_token_key(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting invitation token key. + """ + fake = Faker() + token = fake.uuid4() + # Execute token key generation + token_key = RegisterService._get_invitation_token_key(token) + + # Verify token key format + assert token_key == f"member_invite:token:{token}" From 2575eaf1d6891d69133974fb8c7a2c7931385244 Mon Sep 17 00:00:00 2001 From: winsonwhe <166315655+winsonwhe@users.noreply.github.com> Date: Tue, 5 Aug 2025 21:29:38 +0800 Subject: [PATCH 151/415] Update milvus version to LTS (#23393) Co-authored-by: crazywoola <427733928@qq.com> --- docker/docker-compose-template.yaml | 2 +- docker/docker-compose.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index fe8e4602b7..b5ae4a425c 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -538,7 +538,7 @@ services: milvus-standalone: container_name: milvus-standalone - image: milvusdb/milvus:v2.5.0-beta + image: milvusdb/milvus:v2.5.15 profiles: - milvus command: [ 'milvus', 'run', 'standalone' ] diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 690dccb1a8..19910cca6f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -1087,7 +1087,7 @@ services: milvus-standalone: container_name: milvus-standalone - image: milvusdb/milvus:v2.5.0-beta + image: milvusdb/milvus:v2.5.15 profiles: - milvus command: [ 'milvus', 'run', 'standalone' ] From ad622cea9e47c532546eb916115ab1f1c4b9d622 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Wed, 6 Aug 2025 09:47:56 +0800 Subject: [PATCH 152/415] Feat add testcontainers test for workflow draft variable service (#23466) --- .../test_workflow_draft_variable_service.py | 739 ++++++++++++++++++ 1 file changed, 739 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py new file mode 100644 index 0000000000..85a9355c79 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py @@ -0,0 +1,739 @@ +import pytest +from faker import Faker + +from core.variables.segments import StringSegment +from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID +from models import App, Workflow +from models.enums import DraftVariableType +from models.workflow import WorkflowDraftVariable +from services.workflow_draft_variable_service import ( + UpdateNotSupportedError, + WorkflowDraftVariableService, +) + + +class TestWorkflowDraftVariableService: + """ + Comprehensive integration tests for WorkflowDraftVariableService using testcontainers. + + This test class covers all major functionality of the WorkflowDraftVariableService: + - CRUD operations for workflow draft variables (Create, Read, Update, Delete) + - Variable listing and filtering by type (conversation, system, node) + - Variable updates and resets with proper validation + - Variable deletion operations at different scopes + - Special functionality like prefill and conversation ID retrieval + - Error handling for various edge cases and invalid operations + + All tests use the testcontainers infrastructure to ensure proper database isolation + and realistic testing environment with actual database interactions. + """ + + @pytest.fixture + def mock_external_service_dependencies(self): + """ + Mock setup for external service dependencies. + + WorkflowDraftVariableService doesn't have external dependencies that need mocking, + so this fixture returns an empty dictionary to maintain consistency with other test classes. + This ensures the test structure remains consistent across different service test files. + """ + # WorkflowDraftVariableService doesn't have external dependencies that need mocking + return {} + + def _create_test_app(self, db_session_with_containers, mock_external_service_dependencies, fake=None): + """ + Helper method to create a test app with realistic data for testing. + + This method creates a complete App instance with all required fields populated + using Faker for generating realistic test data. The app is configured for + workflow mode to support workflow draft variable testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies (unused in this service) + fake: Faker instance for generating test data, creates new instance if not provided + + Returns: + App: Created test app instance with all required fields populated + """ + fake = fake or Faker() + app = App() + app.id = fake.uuid4() + app.tenant_id = fake.uuid4() + app.name = fake.company() + app.description = fake.text() + app.mode = "workflow" + app.icon_type = "emoji" + app.icon = "🤖" + app.icon_background = "#FFEAD5" + app.enable_site = True + app.enable_api = True + app.created_by = fake.uuid4() + app.updated_by = app.created_by + + from extensions.ext_database import db + + db.session.add(app) + db.session.commit() + return app + + def _create_test_workflow(self, db_session_with_containers, app, fake=None): + """ + Helper method to create a test workflow associated with an app. + + This method creates a Workflow instance using the proper factory method + to ensure all required fields are set correctly. The workflow is configured + as a draft version with basic graph structure for testing workflow variables. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + app: The app to associate the workflow with + fake: Faker instance for generating test data, creates new instance if not provided + + Returns: + Workflow: Created test workflow instance with proper configuration + """ + fake = fake or Faker() + workflow = Workflow.new( + tenant_id=app.tenant_id, + app_id=app.id, + type="workflow", + version="draft", + graph='{"nodes": [], "edges": []}', + features="{}", + created_by=app.created_by, + environment_variables=[], + conversation_variables=[], + ) + from extensions.ext_database import db + + db.session.add(workflow) + db.session.commit() + return workflow + + def _create_test_variable( + self, db_session_with_containers, app_id, node_id, name, value, variable_type="conversation", fake=None + ): + """ + Helper method to create a test workflow draft variable with proper configuration. + + This method creates different types of variables (conversation, system, node) using + the appropriate factory methods to ensure proper initialization. Each variable type + has specific requirements and this method handles the creation logic for all types. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + app_id: ID of the app to associate the variable with + node_id: ID of the node (or special constants like CONVERSATION_VARIABLE_NODE_ID) + name: Name of the variable for identification + value: StringSegment value for the variable content + variable_type: Type of variable ("conversation", "system", "node") determining creation method + fake: Faker instance for generating test data, creates new instance if not provided + + Returns: + WorkflowDraftVariable: Created test variable instance with proper type configuration + """ + fake = fake or Faker() + if variable_type == "conversation": + # Create conversation variable using the appropriate factory method + variable = WorkflowDraftVariable.new_conversation_variable( + app_id=app_id, + name=name, + value=value, + description=fake.text(max_nb_chars=20), + ) + elif variable_type == "system": + # Create system variable with editable flag and execution context + variable = WorkflowDraftVariable.new_sys_variable( + app_id=app_id, + name=name, + value=value, + node_execution_id=fake.uuid4(), + editable=True, + ) + else: # node variable + # Create node variable with visibility and editability settings + variable = WorkflowDraftVariable.new_node_variable( + app_id=app_id, + node_id=node_id, + name=name, + value=value, + node_execution_id=fake.uuid4(), + visible=True, + editable=True, + ) + from extensions.ext_database import db + + db.session.add(variable) + db.session.commit() + return variable + + def test_get_variable_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting a single variable by ID successfully. + + This test verifies that the service can retrieve a specific variable + by its ID and that the returned variable contains the correct data. + It ensures the basic CRUD read operation works correctly for workflow draft variables. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + test_value = StringSegment(value=fake.word()) + variable = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "test_var", test_value, fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_variable = service.get_variable(variable.id) + assert retrieved_variable is not None + assert retrieved_variable.id == variable.id + assert retrieved_variable.name == "test_var" + assert retrieved_variable.app_id == app.id + assert retrieved_variable.get_value().value == test_value.value + + def test_get_variable_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting a variable that doesn't exist. + + This test verifies that the service returns None when trying to + retrieve a variable with a non-existent ID. This ensures proper + handling of missing data scenarios. + """ + fake = Faker() + non_existent_id = fake.uuid4() + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_variable = service.get_variable(non_existent_id) + assert retrieved_variable is None + + def test_get_draft_variables_by_selectors_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting variables by selectors successfully. + + This test verifies that the service can retrieve multiple variables + using selector pairs (node_id, variable_name) and returns the correct + variables for each selector. This is useful for bulk variable retrieval + operations in workflow execution contexts. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + var1_value = StringSegment(value=fake.word()) + var2_value = StringSegment(value=fake.word()) + var3_value = StringSegment(value=fake.word()) + var1 = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "var1", var1_value, fake=fake + ) + var2 = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "var2", var2_value, fake=fake + ) + var3 = self._create_test_variable( + db_session_with_containers, app.id, "test_node_1", "var3", var3_value, "node", fake=fake + ) + selectors = [ + [CONVERSATION_VARIABLE_NODE_ID, "var1"], + [CONVERSATION_VARIABLE_NODE_ID, "var2"], + ["test_node_1", "var3"], + ] + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_variables = service.get_draft_variables_by_selectors(app.id, selectors) + assert len(retrieved_variables) == 3 + var_names = [var.name for var in retrieved_variables] + assert "var1" in var_names + assert "var2" in var_names + assert "var3" in var_names + for var in retrieved_variables: + if var.name == "var1": + assert var.get_value().value == var1_value.value + elif var.name == "var2": + assert var.get_value().value == var2_value.value + elif var.name == "var3": + assert var.get_value().value == var3_value.value + + def test_list_variables_without_values_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test listing variables without values successfully with pagination. + + This test verifies that the service can list variables with pagination + and that the returned variables don't include their values (for performance). + This is important for scenarios where only variable metadata is needed + without loading the actual content. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + for i in range(5): + test_value = StringSegment(value=fake.numerify("value##")) + self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + result = service.list_variables_without_values(app.id, page=1, limit=3) + assert result.total == 5 + assert len(result.variables) == 3 + assert result.variables[0].created_at >= result.variables[1].created_at + assert result.variables[1].created_at >= result.variables[2].created_at + for var in result.variables: + assert var.name is not None + assert var.app_id == app.id + + def test_list_node_variables_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test listing variables for a specific node successfully. + + This test verifies that the service can filter and return only + variables associated with a specific node ID. This is crucial for + workflow execution where variables need to be scoped to specific nodes. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + node_id = fake.word() + var1_value = StringSegment(value=fake.word()) + var2_value = StringSegment(value=fake.word()) + var3_value = StringSegment(value=fake.word()) + self._create_test_variable(db_session_with_containers, app.id, node_id, "var1", var1_value, "node", fake=fake) + self._create_test_variable(db_session_with_containers, app.id, node_id, "var2", var3_value, "node", fake=fake) + self._create_test_variable( + db_session_with_containers, app.id, "other_node", "var3", var2_value, "node", fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + result = service.list_node_variables(app.id, node_id) + assert len(result.variables) == 2 + for var in result.variables: + assert var.node_id == node_id + assert var.app_id == app.id + var_names = [var.name for var in result.variables] + assert "var1" in var_names + assert "var2" in var_names + assert "var3" not in var_names + + def test_list_conversation_variables_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test listing conversation variables successfully. + + This test verifies that the service can filter and return only + conversation variables, excluding system and node variables. + Conversation variables are user-facing variables that can be + modified during conversation flows. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + conv_var1_value = StringSegment(value=fake.word()) + conv_var2_value = StringSegment(value=fake.word()) + conv_var1 = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "conv_var1", conv_var1_value, fake=fake + ) + conv_var2 = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "conv_var2", conv_var2_value, fake=fake + ) + sys_var_value = StringSegment(value=fake.word()) + self._create_test_variable( + db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var", sys_var_value, "system", fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + result = service.list_conversation_variables(app.id) + assert len(result.variables) == 2 + for var in result.variables: + assert var.node_id == CONVERSATION_VARIABLE_NODE_ID + assert var.app_id == app.id + assert var.get_variable_type() == DraftVariableType.CONVERSATION + var_names = [var.name for var in result.variables] + assert "conv_var1" in var_names + assert "conv_var2" in var_names + assert "sys_var" not in var_names + + def test_update_variable_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test updating a variable's name and value successfully. + + This test verifies that the service can update both the name and value + of an editable variable and that the changes are persisted correctly. + It also checks that the last_edited_at timestamp is updated appropriately. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + original_value = StringSegment(value=fake.word()) + new_value = StringSegment(value=fake.word()) + variable = self._create_test_variable( + db_session_with_containers, + app.id, + CONVERSATION_VARIABLE_NODE_ID, + "original_name", + original_value, + fake=fake, + ) + service = WorkflowDraftVariableService(db_session_with_containers) + updated_variable = service.update_variable(variable, name="new_name", value=new_value) + assert updated_variable.name == "new_name" + assert updated_variable.get_value().value == new_value.value + assert updated_variable.last_edited_at is not None + from extensions.ext_database import db + + db.session.refresh(variable) + assert variable.name == "new_name" + assert variable.get_value().value == new_value.value + assert variable.last_edited_at is not None + + def test_update_variable_not_editable(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test that updating a non-editable variable raises an exception. + + This test verifies that the service properly prevents updates to + variables that are not marked as editable. This is important for + maintaining data integrity and preventing unauthorized modifications + to system-controlled variables. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + original_value = StringSegment(value=fake.word()) + new_value = StringSegment(value=fake.word()) + variable = WorkflowDraftVariable.new_sys_variable( + app_id=app.id, + name=fake.word(), # This is typically not editable + value=original_value, + node_execution_id=fake.uuid4(), + editable=False, # Set as non-editable + ) + from extensions.ext_database import db + + db.session.add(variable) + db.session.commit() + service = WorkflowDraftVariableService(db_session_with_containers) + with pytest.raises(UpdateNotSupportedError) as exc_info: + service.update_variable(variable, name="new_name", value=new_value) + assert "variable not support updating" in str(exc_info.value) + assert variable.id in str(exc_info.value) + + def test_reset_conversation_variable_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test resetting conversation variable successfully. + + This test verifies that the service can reset a conversation variable + to its default value and clear the last_edited_at timestamp. + This functionality is useful for reverting user modifications + back to the original workflow configuration. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + workflow = self._create_test_workflow(db_session_with_containers, app, fake=fake) + from core.variables.variables import StringVariable + + conv_var = StringVariable( + id=fake.uuid4(), + name="test_conv_var", + value="default_value", + selector=[CONVERSATION_VARIABLE_NODE_ID, "test_conv_var"], + ) + workflow.conversation_variables = [conv_var] + from extensions.ext_database import db + + db.session.commit() + modified_value = StringSegment(value=fake.word()) + variable = self._create_test_variable( + db_session_with_containers, + app.id, + CONVERSATION_VARIABLE_NODE_ID, + "test_conv_var", + modified_value, + fake=fake, + ) + variable.last_edited_at = fake.date_time() + db.session.commit() + service = WorkflowDraftVariableService(db_session_with_containers) + reset_variable = service.reset_variable(workflow, variable) + assert reset_variable is not None + assert reset_variable.get_value().value == "default_value" + assert reset_variable.last_edited_at is None + db.session.refresh(variable) + assert variable.get_value().value == "default_value" + assert variable.last_edited_at is None + + def test_delete_variable_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test deleting a single variable successfully. + + This test verifies that the service can delete a specific variable + and that it's properly removed from the database. It ensures that + the deletion operation is atomic and complete. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + test_value = StringSegment(value=fake.word()) + variable = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "test_var", test_value, fake=fake + ) + from extensions.ext_database import db + + assert db.session.query(WorkflowDraftVariable).filter_by(id=variable.id).first() is not None + service = WorkflowDraftVariableService(db_session_with_containers) + service.delete_variable(variable) + assert db.session.query(WorkflowDraftVariable).filter_by(id=variable.id).first() is None + + def test_delete_workflow_variables_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test deleting all variables for a workflow successfully. + + This test verifies that the service can delete all variables + associated with a specific app/workflow. This is useful for + cleanup operations when workflows are deleted or reset. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + for i in range(3): + test_value = StringSegment(value=fake.numerify("value##")) + self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), test_value, fake=fake + ) + other_app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + other_value = StringSegment(value=fake.word()) + self._create_test_variable( + db_session_with_containers, other_app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), other_value, fake=fake + ) + from extensions.ext_database import db + + app_variables = db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id).all() + other_app_variables = db.session.query(WorkflowDraftVariable).filter_by(app_id=other_app.id).all() + assert len(app_variables) == 3 + assert len(other_app_variables) == 1 + service = WorkflowDraftVariableService(db_session_with_containers) + service.delete_workflow_variables(app.id) + app_variables_after = db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id).all() + other_app_variables_after = db.session.query(WorkflowDraftVariable).filter_by(app_id=other_app.id).all() + assert len(app_variables_after) == 0 + assert len(other_app_variables_after) == 1 + + def test_delete_node_variables_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test deleting all variables for a specific node successfully. + + This test verifies that the service can delete all variables + associated with a specific node while preserving variables + for other nodes and conversation variables. This is important + for node-specific cleanup operations in workflow management. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + node_id = fake.word() + for i in range(2): + test_value = StringSegment(value=fake.numerify("node_value##")) + self._create_test_variable( + db_session_with_containers, app.id, node_id, fake.word(), test_value, "node", fake=fake + ) + other_node_value = StringSegment(value=fake.word()) + self._create_test_variable( + db_session_with_containers, app.id, "other_node", fake.word(), other_node_value, "node", fake=fake + ) + conv_value = StringSegment(value=fake.word()) + self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, fake.word(), conv_value, fake=fake + ) + from extensions.ext_database import db + + target_node_variables = db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id, node_id=node_id).all() + other_node_variables = ( + db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id, node_id="other_node").all() + ) + conv_variables = ( + db.session.query(WorkflowDraftVariable) + .filter_by(app_id=app.id, node_id=CONVERSATION_VARIABLE_NODE_ID) + .all() + ) + assert len(target_node_variables) == 2 + assert len(other_node_variables) == 1 + assert len(conv_variables) == 1 + service = WorkflowDraftVariableService(db_session_with_containers) + service.delete_node_variables(app.id, node_id) + target_node_variables_after = ( + db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id, node_id=node_id).all() + ) + other_node_variables_after = ( + db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id, node_id="other_node").all() + ) + conv_variables_after = ( + db.session.query(WorkflowDraftVariable) + .filter_by(app_id=app.id, node_id=CONVERSATION_VARIABLE_NODE_ID) + .all() + ) + assert len(target_node_variables_after) == 0 + assert len(other_node_variables_after) == 1 + assert len(conv_variables_after) == 1 + + def test_prefill_conversation_variable_default_values_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test prefill conversation variable default values successfully. + + This test verifies that the service can automatically create + conversation variables with default values based on the workflow + configuration when none exist. This is important for initializing + workflow variables with proper defaults from the workflow definition. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + workflow = self._create_test_workflow(db_session_with_containers, app, fake=fake) + from core.variables.variables import StringVariable + + conv_var1 = StringVariable( + id=fake.uuid4(), + name="conv_var1", + value="default_value1", + selector=[CONVERSATION_VARIABLE_NODE_ID, "conv_var1"], + ) + conv_var2 = StringVariable( + id=fake.uuid4(), + name="conv_var2", + value="default_value2", + selector=[CONVERSATION_VARIABLE_NODE_ID, "conv_var2"], + ) + workflow.conversation_variables = [conv_var1, conv_var2] + from extensions.ext_database import db + + db.session.commit() + service = WorkflowDraftVariableService(db_session_with_containers) + service.prefill_conversation_variable_default_values(workflow) + draft_variables = ( + db.session.query(WorkflowDraftVariable) + .filter_by(app_id=app.id, node_id=CONVERSATION_VARIABLE_NODE_ID) + .all() + ) + assert len(draft_variables) == 2 + var_names = [var.name for var in draft_variables] + assert "conv_var1" in var_names + assert "conv_var2" in var_names + for var in draft_variables: + assert var.app_id == app.id + assert var.node_id == CONVERSATION_VARIABLE_NODE_ID + assert var.editable is True + assert var.get_variable_type() == DraftVariableType.CONVERSATION + + def test_get_conversation_id_from_draft_variable_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting conversation ID from draft variable successfully. + + This test verifies that the service can extract the conversation ID + from a system variable named "conversation_id". This is important + for maintaining conversation context across workflow executions. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + conversation_id = fake.uuid4() + conv_id_value = StringSegment(value=conversation_id) + self._create_test_variable( + db_session_with_containers, + app.id, + SYSTEM_VARIABLE_NODE_ID, + "conversation_id", + conv_id_value, + "system", + fake=fake, + ) + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_conv_id = service._get_conversation_id_from_draft_variable(app.id) + assert retrieved_conv_id == conversation_id + + def test_get_conversation_id_from_draft_variable_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting conversation ID when it doesn't exist. + + This test verifies that the service returns None when no + conversation_id variable exists for the app. This ensures + proper handling of missing conversation context scenarios. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_conv_id = service._get_conversation_id_from_draft_variable(app.id) + assert retrieved_conv_id is None + + def test_list_system_variables_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test listing system variables successfully. + + This test verifies that the service can filter and return only + system variables, excluding conversation and node variables. + System variables are internal variables used by the workflow + engine for maintaining state and context. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + sys_var1_value = StringSegment(value=fake.word()) + sys_var2_value = StringSegment(value=fake.word()) + sys_var1 = self._create_test_variable( + db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var1", sys_var1_value, "system", fake=fake + ) + sys_var2 = self._create_test_variable( + db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "sys_var2", sys_var2_value, "system", fake=fake + ) + conv_var_value = StringSegment(value=fake.word()) + self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "conv_var", conv_var_value, fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + result = service.list_system_variables(app.id) + assert len(result.variables) == 2 + for var in result.variables: + assert var.node_id == SYSTEM_VARIABLE_NODE_ID + assert var.app_id == app.id + assert var.get_variable_type() == DraftVariableType.SYS + var_names = [var.name for var in result.variables] + assert "sys_var1" in var_names + assert "sys_var2" in var_names + assert "conv_var" not in var_names + + def test_get_variable_by_name_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting variables by name successfully for different types. + + This test verifies that the service can retrieve variables by name + for different variable types (conversation, system, node). This + functionality is important for variable lookup operations during + workflow execution and user interactions. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + test_value = StringSegment(value=fake.word()) + conv_var = self._create_test_variable( + db_session_with_containers, app.id, CONVERSATION_VARIABLE_NODE_ID, "test_conv_var", test_value, fake=fake + ) + sys_var = self._create_test_variable( + db_session_with_containers, app.id, SYSTEM_VARIABLE_NODE_ID, "test_sys_var", test_value, "system", fake=fake + ) + node_var = self._create_test_variable( + db_session_with_containers, app.id, "test_node", "test_node_var", test_value, "node", fake=fake + ) + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_conv_var = service.get_conversation_variable(app.id, "test_conv_var") + assert retrieved_conv_var is not None + assert retrieved_conv_var.name == "test_conv_var" + assert retrieved_conv_var.node_id == CONVERSATION_VARIABLE_NODE_ID + retrieved_sys_var = service.get_system_variable(app.id, "test_sys_var") + assert retrieved_sys_var is not None + assert retrieved_sys_var.name == "test_sys_var" + assert retrieved_sys_var.node_id == SYSTEM_VARIABLE_NODE_ID + retrieved_node_var = service.get_node_variable(app.id, "test_node", "test_node_var") + assert retrieved_node_var is not None + assert retrieved_node_var.name == "test_node_var" + assert retrieved_node_var.node_id == "test_node" + + def test_get_variable_by_name_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test getting variables by name when they don't exist. + + This test verifies that the service returns None when trying to + retrieve variables by name that don't exist. This ensures proper + handling of missing variable scenarios for all variable types. + """ + fake = Faker() + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, fake=fake) + service = WorkflowDraftVariableService(db_session_with_containers) + retrieved_conv_var = service.get_conversation_variable(app.id, "non_existent_conv_var") + assert retrieved_conv_var is None + retrieved_sys_var = service.get_system_variable(app.id, "non_existent_sys_var") + assert retrieved_sys_var is None + retrieved_node_var = service.get_node_variable(app.id, "test_node", "non_existent_node_var") + assert retrieved_node_var is None From eb12fd9461b8a548b508a1f2995ffd35e173f54d Mon Sep 17 00:00:00 2001 From: Good Wood Date: Wed, 6 Aug 2025 09:48:25 +0800 Subject: [PATCH 153/415] fix: fix wrong unicodechar regx (#23468) --- web/service/base.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/service/base.ts b/web/service/base.ts index 8081899837..aa6c77716f 100644 --- a/web/service/base.ts +++ b/web/service/base.ts @@ -103,7 +103,7 @@ function unicodeToChar(text: string) { if (!text) return '' - return text.replace(/\\u[0-9a-f]{4}/g, (_match, p1) => { + return text.replace(/\\u([0-9a-f]{4})/g, (_match, p1) => { return String.fromCharCode(Number.parseInt(p1, 16)) }) } From 8aac402b24573752ca91a5fe1dd7ded6cb842323 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 5 Aug 2025 19:39:19 -0700 Subject: [PATCH 154/415] fix: can not find file (#23472) --- web/app/components/base/file-uploader/hooks.ts | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/web/app/components/base/file-uploader/hooks.ts b/web/app/components/base/file-uploader/hooks.ts index 8e1b2148c5..d3c79a9f45 100644 --- a/web/app/components/base/file-uploader/hooks.ts +++ b/web/app/components/base/file-uploader/hooks.ts @@ -68,6 +68,7 @@ export const useFile = (fileConfig: FileUpload) => { } return true } + case SupportUploadFileTypes.custom: case SupportUploadFileTypes.document: { if (fileSize > docSizeLimit) { notify({ @@ -107,19 +108,6 @@ export const useFile = (fileConfig: FileUpload) => { } return true } - case SupportUploadFileTypes.custom: { - if (fileSize > docSizeLimit) { - notify({ - type: 'error', - message: t('common.fileUploader.uploadFromComputerLimit', { - type: SupportUploadFileTypes.document, - size: formatFileSize(docSizeLimit), - }), - }) - return false - } - return true - } default: { return true } @@ -231,7 +219,7 @@ export const useFile = (fileConfig: FileUpload) => { url: res.url, } if (!isAllowedFileExtension(res.name, res.mime_type, fileConfig.allowed_file_types || [], fileConfig.allowed_file_extensions || [])) { - notify({ type: 'error', message: `${t('common.fileUploader.fileExtensionNotSupport')} ${file.type}` }) + notify({ type: 'error', message: `${t('common.fileUploader.fileExtensionNotSupport')} ${newFile.type}` }) handleRemoveFile(uploadingFile.id) } if (!checkSizeLimit(newFile.supportFileType, newFile.size)) From 724ec12bf37e6cfb5e05081da62bee028828c1de Mon Sep 17 00:00:00 2001 From: GuanMu Date: Wed, 6 Aug 2025 11:01:10 +0800 Subject: [PATCH 155/415] Feat workflow node align (#23451) --- .../hooks/use-selection-interactions.ts | 24 + web/app/components/workflow/index.tsx | 4 + .../workflow/selection-contextmenu.tsx | 433 ++++++++++++++++++ .../workflow/store/workflow/panel-slice.ts | 7 + web/i18n/en-US/workflow.ts | 12 + web/i18n/zh-Hans/workflow.ts | 12 + 6 files changed, 492 insertions(+) create mode 100644 web/app/components/workflow/selection-contextmenu.tsx diff --git a/web/app/components/workflow/hooks/use-selection-interactions.ts b/web/app/components/workflow/hooks/use-selection-interactions.ts index 36aa0485ae..0055549b7d 100644 --- a/web/app/components/workflow/hooks/use-selection-interactions.ts +++ b/web/app/components/workflow/hooks/use-selection-interactions.ts @@ -131,10 +131,34 @@ export const useSelectionInteractions = () => { setEdges(newEdges) }, [store]) + const handleSelectionContextMenu = useCallback((e: MouseEvent) => { + const target = e.target as HTMLElement + if (!target.classList.contains('react-flow__nodesselection-rect')) + return + + e.preventDefault() + const container = document.querySelector('#workflow-container') + const { x, y } = container!.getBoundingClientRect() + workflowStore.setState({ + selectionMenu: { + top: e.clientY - y, + left: e.clientX - x, + }, + }) + }, [workflowStore]) + + const handleSelectionContextmenuCancel = useCallback(() => { + workflowStore.setState({ + selectionMenu: undefined, + }) + }, [workflowStore]) + return { handleSelectionStart, handleSelectionChange, handleSelectionDrag, handleSelectionCancel, + handleSelectionContextMenu, + handleSelectionContextmenuCancel, } } diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index a5894451c1..2ebb040f07 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -65,6 +65,7 @@ import HelpLine from './help-line' import CandidateNode from './candidate-node' import PanelContextmenu from './panel-contextmenu' import NodeContextmenu from './node-contextmenu' +import SelectionContextmenu from './selection-contextmenu' import SyncingDataModal from './syncing-data-modal' import LimitTips from './limit-tips' import { @@ -263,6 +264,7 @@ export const Workflow: FC = memo(({ handleSelectionStart, handleSelectionChange, handleSelectionDrag, + handleSelectionContextMenu, } = useSelectionInteractions() const { handlePaneContextMenu, @@ -313,6 +315,7 @@ export const Workflow: FC = memo(({ + { !!showConfirm && ( @@ -349,6 +352,7 @@ export const Workflow: FC = memo(({ onSelectionChange={handleSelectionChange} onSelectionDrag={handleSelectionDrag} onPaneContextMenu={handlePaneContextMenu} + onSelectionContextMenu={handleSelectionContextMenu} connectionLineComponent={CustomConnectionLine} // TODO: For LOOP node, how to distinguish between ITERATION and LOOP here? Maybe both are the same? connectionLineContainerStyle={{ zIndex: ITERATION_CHILDREN_Z_INDEX }} diff --git a/web/app/components/workflow/selection-contextmenu.tsx b/web/app/components/workflow/selection-contextmenu.tsx new file mode 100644 index 0000000000..71c8e97ab7 --- /dev/null +++ b/web/app/components/workflow/selection-contextmenu.tsx @@ -0,0 +1,433 @@ +import { + memo, + useCallback, + useEffect, + useMemo, + useRef, +} from 'react' +import { useTranslation } from 'react-i18next' +import { useClickAway } from 'ahooks' +import { useStore as useReactFlowStore, useStoreApi } from 'reactflow' +import { + RiAlignBottom, + RiAlignCenter, + RiAlignJustify, + RiAlignLeft, + RiAlignRight, + RiAlignTop, +} from '@remixicon/react' +import { useNodesReadOnly, useNodesSyncDraft } from './hooks' +import produce from 'immer' +import { WorkflowHistoryEvent, useWorkflowHistory } from './hooks/use-workflow-history' +import { useStore } from './store' +import { useSelectionInteractions } from './hooks/use-selection-interactions' +import { useWorkflowStore } from './store' + +enum AlignType { + Left = 'left', + Center = 'center', + Right = 'right', + Top = 'top', + Middle = 'middle', + Bottom = 'bottom', + DistributeHorizontal = 'distributeHorizontal', + DistributeVertical = 'distributeVertical', +} + +const SelectionContextmenu = () => { + const { t } = useTranslation() + const ref = useRef(null) + const { getNodesReadOnly } = useNodesReadOnly() + const { handleSelectionContextmenuCancel } = useSelectionInteractions() + const selectionMenu = useStore(s => s.selectionMenu) + + // Access React Flow methods + const store = useStoreApi() + const workflowStore = useWorkflowStore() + + // Get selected nodes for alignment logic + const selectedNodes = useReactFlowStore(state => + state.getNodes().filter(node => node.selected), + ) + + const { handleSyncWorkflowDraft } = useNodesSyncDraft() + const { saveStateToHistory } = useWorkflowHistory() + + const menuRef = useRef(null) + + const menuPosition = useMemo(() => { + if (!selectionMenu) return { left: 0, top: 0 } + + let left = selectionMenu.left + let top = selectionMenu.top + + const container = document.querySelector('#workflow-container') + if (container) { + const { width: containerWidth, height: containerHeight } = container.getBoundingClientRect() + + const menuWidth = 240 + + const estimatedMenuHeight = 380 + + if (left + menuWidth > containerWidth) + left = left - menuWidth + + if (top + estimatedMenuHeight > containerHeight) + top = top - estimatedMenuHeight + + left = Math.max(0, left) + top = Math.max(0, top) + } + + return { left, top } + }, [selectionMenu]) + + useClickAway(() => { + handleSelectionContextmenuCancel() + }, ref) + + useEffect(() => { + if (selectionMenu && selectedNodes.length <= 1) + handleSelectionContextmenuCancel() + }, [selectionMenu, selectedNodes.length, handleSelectionContextmenuCancel]) + + // Handle align nodes logic + const handleAlignNode = useCallback((currentNode: any, nodeToAlign: any, alignType: AlignType, minX: number, maxX: number, minY: number, maxY: number) => { + const width = nodeToAlign.width + const height = nodeToAlign.height + + // Calculate new positions based on alignment type + switch (alignType) { + case AlignType.Left: + // For left alignment, align left edge of each node to minX + currentNode.position.x = minX + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.x = minX + break + + case AlignType.Center: { + // For center alignment, center each node horizontally in the selection bounds + const centerX = minX + (maxX - minX) / 2 - width / 2 + currentNode.position.x = centerX + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.x = centerX + break + } + + case AlignType.Right: { + // For right alignment, align right edge of each node to maxX + const rightX = maxX - width + currentNode.position.x = rightX + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.x = rightX + break + } + + case AlignType.Top: { + // For top alignment, align top edge of each node to minY + currentNode.position.y = minY + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.y = minY + break + } + + case AlignType.Middle: { + // For middle alignment, center each node vertically in the selection bounds + const middleY = minY + (maxY - minY) / 2 - height / 2 + currentNode.position.y = middleY + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.y = middleY + break + } + + case AlignType.Bottom: { + // For bottom alignment, align bottom edge of each node to maxY + const newY = Math.round(maxY - height) + currentNode.position.y = newY + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.y = newY + break + } + } + }, []) + + // Handle distribute nodes logic + const handleDistributeNodes = useCallback((nodesToAlign: any[], nodes: any[], alignType: AlignType) => { + // Sort nodes appropriately + const sortedNodes = [...nodesToAlign].sort((a, b) => { + if (alignType === AlignType.DistributeHorizontal) { + // Sort by left position for horizontal distribution + return a.position.x - b.position.x + } + else { + // Sort by top position for vertical distribution + return a.position.y - b.position.y + } + }) + + if (sortedNodes.length < 3) + return null // Need at least 3 nodes for distribution + + let totalGap = 0 + let fixedSpace = 0 + + if (alignType === AlignType.DistributeHorizontal) { + // Fixed positions - first node's left edge and last node's right edge + const firstNodeLeft = sortedNodes[0].position.x + const lastNodeRight = sortedNodes[sortedNodes.length - 1].position.x + (sortedNodes[sortedNodes.length - 1].width || 0) + + // Total available space + totalGap = lastNodeRight - firstNodeLeft + + // Space occupied by nodes themselves + fixedSpace = sortedNodes.reduce((sum, node) => sum + (node.width || 0), 0) + } + else { + // Fixed positions - first node's top edge and last node's bottom edge + const firstNodeTop = sortedNodes[0].position.y + const lastNodeBottom = sortedNodes[sortedNodes.length - 1].position.y + (sortedNodes[sortedNodes.length - 1].height || 0) + + // Total available space + totalGap = lastNodeBottom - firstNodeTop + + // Space occupied by nodes themselves + fixedSpace = sortedNodes.reduce((sum, node) => sum + (node.height || 0), 0) + } + + // Available space for gaps + const availableSpace = totalGap - fixedSpace + + // Calculate even spacing between node edges + const spacing = availableSpace / (sortedNodes.length - 1) + + if (spacing <= 0) + return null // Nodes are overlapping, can't distribute evenly + + return produce(nodes, (draft) => { + // Keep first node fixed, position others with even gaps + let currentPosition + + if (alignType === AlignType.DistributeHorizontal) { + // Start from first node's right edge + currentPosition = sortedNodes[0].position.x + (sortedNodes[0].width || 0) + } + else { + // Start from first node's bottom edge + currentPosition = sortedNodes[0].position.y + (sortedNodes[0].height || 0) + } + + // Skip first node (index 0), it stays in place + for (let i = 1; i < sortedNodes.length - 1; i++) { + const nodeToAlign = sortedNodes[i] + const currentNode = draft.find(n => n.id === nodeToAlign.id) + if (!currentNode) continue + + if (alignType === AlignType.DistributeHorizontal) { + // Position = previous right edge + spacing + const newX: number = currentPosition + spacing + currentNode.position.x = newX + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.x = newX + + // Update for next iteration - current node's right edge + currentPosition = newX + (nodeToAlign.width || 0) + } + else { + // Position = previous bottom edge + spacing + const newY: number = currentPosition + spacing + currentNode.position.y = newY + if (currentNode.positionAbsolute) + currentNode.positionAbsolute.y = newY + + // Update for next iteration - current node's bottom edge + currentPosition = newY + (nodeToAlign.height || 0) + } + } + }) + }, []) + + const handleAlignNodes = useCallback((alignType: AlignType) => { + if (getNodesReadOnly() || selectedNodes.length <= 1) { + handleSelectionContextmenuCancel() + return + } + + // Disable node animation state - same as handleNodeDragStart + workflowStore.setState({ nodeAnimation: false }) + + // Get all current nodes + const nodes = store.getState().getNodes() + + // Get all selected nodes + const selectedNodeIds = selectedNodes.map(node => node.id) + const nodesToAlign = nodes.filter(node => selectedNodeIds.includes(node.id)) + + if (nodesToAlign.length <= 1) { + handleSelectionContextmenuCancel() + return + } + + // Calculate node boundaries for alignment + let minX = Number.MAX_SAFE_INTEGER + let maxX = Number.MIN_SAFE_INTEGER + let minY = Number.MAX_SAFE_INTEGER + let maxY = Number.MIN_SAFE_INTEGER + + // Calculate boundaries of selected nodes + const validNodes = nodesToAlign.filter(node => node.width && node.height) + validNodes.forEach((node) => { + const width = node.width! + const height = node.height! + minX = Math.min(minX, node.position.x) + maxX = Math.max(maxX, node.position.x + width) + minY = Math.min(minY, node.position.y) + maxY = Math.max(maxY, node.position.y + height) + }) + + // Handle distribute nodes logic + if (alignType === AlignType.DistributeHorizontal || alignType === AlignType.DistributeVertical) { + const distributeNodes = handleDistributeNodes(nodesToAlign, nodes, alignType) + if (distributeNodes) { + // Apply node distribution updates + store.getState().setNodes(distributeNodes) + handleSelectionContextmenuCancel() + + // Clear guide lines + const { setHelpLineHorizontal, setHelpLineVertical } = workflowStore.getState() + setHelpLineHorizontal() + setHelpLineVertical() + + // Sync workflow draft + handleSyncWorkflowDraft() + + // Save to history + saveStateToHistory(WorkflowHistoryEvent.NodeDragStop) + + return // End function execution + } + } + + const newNodes = produce(nodes, (draft) => { + // Iterate through all selected nodes + const validNodesToAlign = nodesToAlign.filter(node => node.width && node.height) + validNodesToAlign.forEach((nodeToAlign) => { + // Find the corresponding node in draft - consistent with handleNodeDrag + const currentNode = draft.find(n => n.id === nodeToAlign.id) + if (!currentNode) + return + + // Use the extracted alignment function + handleAlignNode(currentNode, nodeToAlign, alignType, minX, maxX, minY, maxY) + }) + }) + + // Apply node position updates - consistent with handleNodeDrag and handleNodeDragStop + try { + // Directly use setNodes to update nodes - consistent with handleNodeDrag + store.getState().setNodes(newNodes) + + // Close popup + handleSelectionContextmenuCancel() + + // Clear guide lines - consistent with handleNodeDragStop + const { setHelpLineHorizontal, setHelpLineVertical } = workflowStore.getState() + setHelpLineHorizontal() + setHelpLineVertical() + + // Sync workflow draft - consistent with handleNodeDragStop + handleSyncWorkflowDraft() + + // Save to history - consistent with handleNodeDragStop + saveStateToHistory(WorkflowHistoryEvent.NodeDragStop) + } + catch (err) { + console.error('Failed to update nodes:', err) + } + }, [store, workflowStore, selectedNodes, getNodesReadOnly, handleSyncWorkflowDraft, saveStateToHistory, handleSelectionContextmenuCancel, handleAlignNode, handleDistributeNodes]) + + if (!selectionMenu) + return null + + return ( +
    +
    +
    +
    + {t('workflow.operator.vertical')} +
    +
    handleAlignNodes(AlignType.Top)} + > + + {t('workflow.operator.alignTop')} +
    +
    handleAlignNodes(AlignType.Middle)} + > + + {t('workflow.operator.alignMiddle')} +
    +
    handleAlignNodes(AlignType.Bottom)} + > + + {t('workflow.operator.alignBottom')} +
    +
    handleAlignNodes(AlignType.DistributeVertical)} + > + + {t('workflow.operator.distributeVertical')} +
    +
    +
    +
    +
    + {t('workflow.operator.horizontal')} +
    +
    handleAlignNodes(AlignType.Left)} + > + + {t('workflow.operator.alignLeft')} +
    +
    handleAlignNodes(AlignType.Center)} + > + + {t('workflow.operator.alignCenter')} +
    +
    handleAlignNodes(AlignType.Right)} + > + + {t('workflow.operator.alignRight')} +
    +
    handleAlignNodes(AlignType.DistributeHorizontal)} + > + + {t('workflow.operator.distributeHorizontal')} +
    +
    +
    +
    + ) +} + +export default memo(SelectionContextmenu) diff --git a/web/app/components/workflow/store/workflow/panel-slice.ts b/web/app/components/workflow/store/workflow/panel-slice.ts index 855f45f264..4848beeac5 100644 --- a/web/app/components/workflow/store/workflow/panel-slice.ts +++ b/web/app/components/workflow/store/workflow/panel-slice.ts @@ -15,6 +15,11 @@ export type PanelSliceShape = { left: number } setPanelMenu: (panelMenu: PanelSliceShape['panelMenu']) => void + selectionMenu?: { + top: number + left: number + } + setSelectionMenu: (selectionMenu: PanelSliceShape['selectionMenu']) => void showVariableInspectPanel: boolean setShowVariableInspectPanel: (showVariableInspectPanel: boolean) => void initShowLastRunTab: boolean @@ -33,6 +38,8 @@ export const createPanelSlice: StateCreator = set => ({ setShowDebugAndPreviewPanel: showDebugAndPreviewPanel => set(() => ({ showDebugAndPreviewPanel })), panelMenu: undefined, setPanelMenu: panelMenu => set(() => ({ panelMenu })), + selectionMenu: undefined, + setSelectionMenu: selectionMenu => set(() => ({ selectionMenu })), showVariableInspectPanel: false, setShowVariableInspectPanel: showVariableInspectPanel => set(() => ({ showVariableInspectPanel })), initShowLastRunTab: false, diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 10b74dadb3..2653303e63 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Zoom to 50%', zoomTo100: 'Zoom to 100%', zoomToFit: 'Zoom to Fit', + alignNodes: 'Align Nodes', + alignLeft: 'Left', + alignCenter: 'Center', + alignRight: 'Right', + alignTop: 'Top', + alignMiddle: 'Middle', + alignBottom: 'Bottom', + vertical: 'Vertical', + horizontal: 'Horizontal', + distributeHorizontal: 'Space Horizontally', + distributeVertical: 'Space Vertically', + selectionAlignment: 'Selection Alignment', }, variableReference: { noAvailableVars: 'No available variables', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index dbc37a7b38..e18c597306 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: '缩放到 50%', zoomTo100: '放大到 100%', zoomToFit: '自适应视图', + alignNodes: '对齐节点', + alignLeft: '左对齐', + alignCenter: '居中对齐', + alignRight: '右对齐', + alignTop: '顶部对齐', + alignMiddle: '中部对齐', + alignBottom: '底部对齐', + vertical: '垂直方向', + horizontal: '水平方向', + distributeHorizontal: '水平等间距', + distributeVertical: '垂直等间距', + selectionAlignment: '选择对齐', }, variableReference: { noAvailableVars: '没有可用变量', From 823872d294708eb5acb94b857a5a5ae44fad0252 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 6 Aug 2025 11:19:47 +0800 Subject: [PATCH 156/415] Fix API documentation layout and dark mode contrast issues (#23462) --- web/app/(commonLayout)/datasets/Datasets.tsx | 2 +- web/app/(commonLayout)/datasets/Doc.tsx | 4 +- .../datasets/template/template.en.mdx | 80 +++++++++--------- .../datasets/template/template.ja.mdx | 74 ++++++++--------- .../datasets/template/template.zh.mdx | 82 +++++++++---------- web/app/components/develop/doc.tsx | 4 +- web/global.d.ts | 5 ++ 7 files changed, 128 insertions(+), 123 deletions(-) diff --git a/web/app/(commonLayout)/datasets/Datasets.tsx b/web/app/(commonLayout)/datasets/Datasets.tsx index 2d4848e92e..18a31d8e87 100644 --- a/web/app/(commonLayout)/datasets/Datasets.tsx +++ b/web/app/(commonLayout)/datasets/Datasets.tsx @@ -36,7 +36,7 @@ const getKey = ( } type Props = { - containerRef: React.RefObject + containerRef: React.RefObject tags: string[] keywords: string includeAll: boolean diff --git a/web/app/(commonLayout)/datasets/Doc.tsx b/web/app/(commonLayout)/datasets/Doc.tsx index 042a90f4af..78f767dbec 100644 --- a/web/app/(commonLayout)/datasets/Doc.tsx +++ b/web/app/(commonLayout)/datasets/Doc.tsx @@ -87,7 +87,7 @@ const Doc = ({ apiBaseUrl }: DocProps) => {
    {isTocExpanded ? ( -
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    ___ -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    diff --git a/web/app/(commonLayout)/datasets/template/template.zh.mdx b/web/app/(commonLayout)/datasets/template/template.zh.mdx index c21ce3bf5f..b7ea889a46 100644 --- a/web/app/(commonLayout)/datasets/template/template.zh.mdx +++ b/web/app/(commonLayout)/datasets/template/template.zh.mdx @@ -25,7 +25,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    ___ -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    @@ -1915,7 +1915,7 @@ ___ -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    -
    +
    diff --git a/web/app/components/develop/doc.tsx b/web/app/components/develop/doc.tsx index 65e6d4aec0..806ee72725 100644 --- a/web/app/components/develop/doc.tsx +++ b/web/app/components/develop/doc.tsx @@ -87,7 +87,7 @@ const Doc = ({ appDetail }: IDocProps) => {
    {isTocExpanded ? ( -
    diff --git a/web/app/components/develop/template/template.zh.mdx b/web/app/components/develop/template/template.zh.mdx index a5eea3d193..bdb54458e0 100755 --- a/web/app/components/develop/template/template.zh.mdx +++ b/web/app/components/develop/template/template.zh.mdx @@ -423,7 +423,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' url='/text-to-audio' method='POST' title='文字转语音' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_advanced_chat.en.mdx b/web/app/components/develop/template/template_advanced_chat.en.mdx index 66c45b6e4f..c534c3d6b7 100644 --- a/web/app/components/develop/template/template_advanced_chat.en.mdx +++ b/web/app/components/develop/template/template_advanced_chat.en.mdx @@ -1136,7 +1136,7 @@ Chat applications support session persistence, allowing previous chat history to url='/audio-to-text' method='POST' title='Speech to Text' - name='#audio' + name='#audio-to-text' /> @@ -1187,7 +1187,7 @@ Chat applications support session persistence, allowing previous chat history to url='/text-to-audio' method='POST' title='Text to Audio' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_advanced_chat.ja.mdx b/web/app/components/develop/template/template_advanced_chat.ja.mdx index 849b58129e..28698d3e12 100644 --- a/web/app/components/develop/template/template_advanced_chat.ja.mdx +++ b/web/app/components/develop/template/template_advanced_chat.ja.mdx @@ -1136,7 +1136,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from url='/audio-to-text' method='POST' title='音声からテキストへ' - name='#audio' + name='#audio-to-text' /> @@ -1187,7 +1187,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from url='/text-to-audio' method='POST' title='テキストから音声へ' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_advanced_chat.zh.mdx b/web/app/components/develop/template/template_advanced_chat.zh.mdx index 47c88bda90..08d8d54017 100755 --- a/web/app/components/develop/template/template_advanced_chat.zh.mdx +++ b/web/app/components/develop/template/template_advanced_chat.zh.mdx @@ -1174,7 +1174,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' url='/audio-to-text' method='POST' title='语音转文字' - name='#audio' + name='#audio-to-text' /> @@ -1222,7 +1222,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' url='/text-to-audio' method='POST' title='文字转语音' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_chat.en.mdx b/web/app/components/develop/template/template_chat.en.mdx index 24efe62fda..1bdb884958 100644 --- a/web/app/components/develop/template/template_chat.en.mdx +++ b/web/app/components/develop/template/template_chat.en.mdx @@ -1170,7 +1170,7 @@ Chat applications support session persistence, allowing previous chat history to url='/audio-to-text' method='POST' title='Speech to Text' - name='#audio' + name='#audio-to-text' /> @@ -1221,7 +1221,7 @@ Chat applications support session persistence, allowing previous chat history to url='/text-to-audio' method='POST' title='Text to Audio' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_chat.ja.mdx b/web/app/components/develop/template/template_chat.ja.mdx index d251bcd5cd..d82703944a 100644 --- a/web/app/components/develop/template/template_chat.ja.mdx +++ b/web/app/components/develop/template/template_chat.ja.mdx @@ -1169,7 +1169,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from url='/audio-to-text' method='POST' title='音声からテキストへ' - name='#audio' + name='#audio-to-text' /> @@ -1220,7 +1220,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty, Paragraph } from url='/text-to-audio' method='POST' title='テキストから音声へ' - name='#audio' + name='#text-to-audio' /> diff --git a/web/app/components/develop/template/template_chat.zh.mdx b/web/app/components/develop/template/template_chat.zh.mdx index 998d524b55..abfc718909 100644 --- a/web/app/components/develop/template/template_chat.zh.mdx +++ b/web/app/components/develop/template/template_chat.zh.mdx @@ -1185,7 +1185,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' url='/audio-to-text' method='POST' title='语音转文字' - name='#audio' + name='#audio-to-text' /> @@ -1233,7 +1233,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' url='/text-to-audio' method='POST' title='文字转语音' - name='#audio' + name='#text-to-audio' /> diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index 79e54b96c0..d9189ad5cc 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Auf 50% vergrößern', zoomTo100: 'Auf 100% vergrößern', zoomToFit: 'An Bildschirm anpassen', + selectionAlignment: 'Ausrichtung der Auswahl', + alignLeft: 'Links', + alignTop: 'Nach oben', + distributeVertical: 'Vertikaler Raum', + alignBottom: 'Unteres', + distributeHorizontal: 'Horizontaler Raum', + vertical: 'Senkrecht', + alignMiddle: 'Mitte', + alignCenter: 'Mitte', + alignRight: 'Rechts', + alignNodes: 'Knoten ausrichten', + horizontal: 'Horizontal', }, panel: { userInputField: 'Benutzereingabefeld', diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index 5801647611..bac4b9c740 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Zoom al 50%', zoomTo100: 'Zoom al 100%', zoomToFit: 'Ajustar al tamaño', + alignTop: 'Arriba', + alignBottom: 'Fondo', + alignNodes: 'Alinear nodos', + alignCenter: 'Centro', + selectionAlignment: 'Alineación de selección', + horizontal: 'Horizontal', + distributeHorizontal: 'Espaciar horizontalmente', + vertical: 'Vertical', + distributeVertical: 'Espaciar verticalmente', + alignMiddle: 'medio', + alignLeft: 'izquierdo', + alignRight: 'derecho', }, panel: { userInputField: 'Campo de entrada del usuario', diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index 55086a1a3b..dde4988dc4 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'بزرگ‌نمایی به 50%', zoomTo100: 'بزرگ‌نمایی به 100%', zoomToFit: 'تناسب با اندازه', + horizontal: 'افقی', + alignBottom: 'پایین', + alignRight: 'راست', + vertical: 'عمودی', + alignCenter: 'مرکز', + alignLeft: 'چپ', + distributeVertical: 'فضا عمودی', + distributeHorizontal: 'فضا به صورت افقی', + alignTop: 'بالا', + alignNodes: 'تراز کردن گره ها', + selectionAlignment: 'تراز انتخاب', + alignMiddle: 'میانه', }, panel: { userInputField: 'فیلد ورودی کاربر', diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index 20d5d23cee..bd801fb841 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Zoomer à 50%', zoomTo100: 'Zoomer à 100%', zoomToFit: 'Zoomer pour ajuster', + alignBottom: 'Fond', + alignLeft: 'Gauche', + alignCenter: 'Centre', + alignTop: 'Retour au début', + alignNodes: 'Aligner les nœuds', + distributeHorizontal: 'Espace horizontal', + alignMiddle: 'Milieu', + horizontal: 'Horizontal', + selectionAlignment: 'Alignement de la sélection', + alignRight: 'Droite', + vertical: 'Vertical', + distributeVertical: 'Espace vertical', }, panel: { userInputField: 'Champ de saisie de l\'utilisateur', diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index 74c5e04097..baeed41d31 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -298,6 +298,18 @@ const translation = { zoomTo50: '50% पर ज़ूम करें', zoomTo100: '100% पर ज़ूम करें', zoomToFit: 'फिट करने के लिए ज़ूम करें', + alignRight: 'सही', + alignLeft: 'बाईं ओर', + alignTop: 'शीर्ष', + horizontal: 'क्षैतिज', + alignNodes: 'नोड्स को संरेखित करें', + selectionAlignment: 'चयन संरेखण', + alignCenter: 'केंद्र', + vertical: 'ऊर्ध्वाधर', + distributeHorizontal: 'क्षैतिज स्पेस', + alignBottom: 'तल', + distributeVertical: 'अंतरिक्ष को वर्टिकल रूप से', + alignMiddle: 'मध्य', }, panel: { userInputField: 'उपयोगकर्ता इनपुट फ़ील्ड', diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index d006eba2be..f1fdf5c3fb 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -301,6 +301,18 @@ const translation = { zoomTo50: 'Zoom al 50%', zoomTo100: 'Zoom al 100%', zoomToFit: 'Zoom per Adattare', + alignRight: 'A destra', + selectionAlignment: 'Allineamento della selezione', + alignBottom: 'Fondoschiena', + alignTop: 'In alto', + vertical: 'Verticale', + alignCenter: 'Centro', + alignLeft: 'A sinistra', + alignMiddle: 'Mezzo', + horizontal: 'Orizzontale', + alignNodes: 'Allinea nodi', + distributeHorizontal: 'Spazia orizzontalmente', + distributeVertical: 'Spazia verticalmente', }, panel: { userInputField: 'Campo di Input Utente', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index a987efdfb1..fa53075585 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: '50% サイズ', zoomTo100: '等倍表示', zoomToFit: '画面に合わせる', + horizontal: '横', + alignBottom: '底', + alignNodes: 'ノードを整列させる', + vertical: '垂直', + alignLeft: '左', + alignTop: 'トップ', + alignRight: '右', + alignMiddle: '中間', + distributeVertical: '垂直にスペースを', + alignCenter: 'センター', + selectionAlignment: '選択の整列', + distributeHorizontal: '空間を水平方向に', }, variableReference: { noAvailableVars: '利用可能な変数がありません', diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index bc3ed580b6..ca83ae3b10 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -308,6 +308,18 @@ const translation = { zoomTo50: '50% 로 확대', zoomTo100: '100% 로 확대', zoomToFit: '화면에 맞게 확대', + alignCenter: '중', + alignRight: '오른쪽', + alignLeft: '왼쪽', + vertical: '세로', + alignTop: '맨 위로', + alignMiddle: '중간', + alignNodes: '노드 정렬', + distributeVertical: '수직 공간', + horizontal: '가로', + selectionAlignment: '선택 정렬', + alignBottom: '밑바닥', + distributeHorizontal: '수평 공간', }, panel: { userInputField: '사용자 입력 필드', diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 29ab3ff182..132d050868 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Powiększ do 50%', zoomTo100: 'Powiększ do 100%', zoomToFit: 'Dopasuj do ekranu', + alignMiddle: 'Środek', + alignTop: 'Do góry', + distributeHorizontal: 'Odstęp w poziomie', + alignCenter: 'Centrum', + alignRight: 'Prawy', + alignNodes: 'Wyrównywanie węzłów', + selectionAlignment: 'Wyrównanie zaznaczenia', + horizontal: 'Poziomy', + distributeVertical: 'Przestrzeń w pionie', + alignBottom: 'Dno', + alignLeft: 'Lewy', + vertical: 'Pionowy', }, panel: { userInputField: 'Pole wprowadzania użytkownika', diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index 8c6c3df90a..e705641666 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Aproximar para 50%', zoomTo100: 'Aproximar para 100%', zoomToFit: 'Aproximar para ajustar', + vertical: 'Vertical', + alignNodes: 'Alinhar nós', + selectionAlignment: 'Alinhamento de seleção', + alignLeft: 'Esquerda', + alignBottom: 'Fundo', + distributeHorizontal: 'Espaço horizontalmente', + alignMiddle: 'Meio', + alignRight: 'Certo', + horizontal: 'Horizontal', + distributeVertical: 'Espaço Verticalmente', + alignCenter: 'Centro', + alignTop: 'Início', }, panel: { userInputField: 'Campo de entrada do usuário', diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index a994edd78f..5b90ce5abc 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Mărește la 50%', zoomTo100: 'Mărește la 100%', zoomToFit: 'Mărește pentru a se potrivi', + horizontal: 'Orizontal', + selectionAlignment: 'Alinierea selecției', + vertical: 'Vertical', + alignRight: 'Dreapta', + alignLeft: 'Stânga', + alignMiddle: 'Mijloc', + distributeVertical: 'Spațiu vertical', + alignCenter: 'Centru', + distributeHorizontal: 'Spațiu orizontal', + alignBottom: 'Fund', + alignTop: 'Culme', + alignNodes: 'Alinierea nodurilor', }, panel: { userInputField: 'Câmp de introducere utilizator', diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 81534334b4..0fff591b50 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Масштаб 50%', zoomTo100: 'Масштаб 100%', zoomToFit: 'По размеру', + alignTop: 'Вверх', + alignBottom: 'Дно', + alignRight: 'Правильно', + distributeHorizontal: 'Пространство по горизонтали', + alignMiddle: 'Середина', + vertical: 'Вертикальный', + alignCenter: 'Центр', + alignLeft: 'Налево', + selectionAlignment: 'Выравнивание выделения', + horizontal: 'Горизонтальный', + alignNodes: 'Выравнивание узлов', + distributeVertical: 'Пространство по вертикали', }, panel: { userInputField: 'Поле ввода пользователя', diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index da8d19aa06..c544a9a35c 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomIn: 'Zoom in', zoomTo50: 'Povečaj na 50%', zoomTo100: 'Povečaj na 100%', + alignMiddle: 'Srednji', + alignBottom: 'Dno', + alignCenter: 'Center', + distributeVertical: 'Razmik navpično', + alignRight: 'Desno', + alignTop: 'Vrh', + vertical: 'Navpičen', + distributeHorizontal: 'Razmik vodoravno', + selectionAlignment: 'Poravnava izbora', + alignNodes: 'Poravnava vozlišč', + horizontal: 'Vodoraven', + alignLeft: 'Levo', }, variableReference: { conversationVars: 'pogovorne spremenljivke', diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index 78a49716b0..bcab73d7c4 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'ซูมไปที่ 50%', zoomTo100: 'ซูมไปที่ 100%', zoomToFit: 'ซูมให้พอดี', + alignBottom: 'ก้น', + alignCenter: 'ศูนย์กลาง', + alignMiddle: 'กลาง', + horizontal: 'แนวราบ', + vertical: 'ซึ่งตั้งตรง', + alignTop: 'ด้านบน', + distributeVertical: 'พื้นที่ในแนวตั้ง', + alignLeft: 'ซ้าย', + selectionAlignment: 'การจัดตําแหน่งการเลือก', + distributeHorizontal: 'ช่องว่างในแนวนอน', + alignRight: 'ขวา', + alignNodes: 'จัดตําแหน่งโหนด', }, panel: { userInputField: 'ฟิลด์ป้อนข้อมูลของผู้ใช้', diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 00310524de..379c2c30e1 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: '%50 Yakınlaştır', zoomTo100: '%100 Yakınlaştır', zoomToFit: 'Sığdıracak Şekilde Yakınlaştır', + alignCenter: 'Orta', + alignMiddle: 'Orta', + alignLeft: 'Sol', + alignNodes: 'Düğümleri Hizala', + vertical: 'Dikey', + alignRight: 'Sağ', + alignTop: 'Sayfanın Üstü', + alignBottom: 'Dip', + selectionAlignment: 'Seçim Hizalama', + distributeHorizontal: 'Yatay Boşluk', + horizontal: 'Yatay', + distributeVertical: 'Dikey Boşluk', }, panel: { userInputField: 'Kullanıcı Giriş Alanı', diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index fd6e71adf4..e174b0f103 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Збільшити до 50%', zoomTo100: 'Збільшити до 100%', zoomToFit: 'Збільшити для підгонки', + alignCenter: 'Центр', + alignRight: 'Праворуч', + vertical: 'Вертикальні', + alignBottom: 'Низ', + alignLeft: 'Ліворуч', + alignTop: 'Верх', + horizontal: 'Горизонтальні', + alignMiddle: 'Середній', + distributeVertical: 'Простір по вертикалі', + distributeHorizontal: 'Простір по горизонталі', + selectionAlignment: 'Вирівнювання вибору', + alignNodes: 'Вирівнювання вузлів', }, panel: { userInputField: 'Поле введення користувача', diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 6898ec9a88..79265c3f72 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: 'Phóng to 50%', zoomTo100: 'Phóng to 100%', zoomToFit: 'Phóng to vừa màn hình', + alignBottom: 'Đáy', + alignMiddle: 'Trung', + alignRight: 'Bên phải', + alignNodes: 'Căn chỉnh các nút', + alignLeft: 'Bên trái', + horizontal: 'Ngang', + alignCenter: 'Trung tâm', + alignTop: 'Đỉnh', + distributeVertical: 'Không gian theo chiều dọc', + selectionAlignment: 'Căn chỉnh lựa chọn', + distributeHorizontal: 'Không gian theo chiều ngang', + vertical: 'Thẳng đứng', }, panel: { userInputField: 'Trường đầu vào của người dùng', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index ce10ad387f..41d64f14d7 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -287,6 +287,18 @@ const translation = { zoomTo50: '縮放到 50%', zoomTo100: '放大到 100%', zoomToFit: '自適應視圖', + alignNodes: '對齊節點(Align Nodes)', + distributeVertical: '垂直空間', + alignLeft: '左', + distributeHorizontal: '水平空間', + vertical: '垂直', + alignTop: '返回頁首', + alignCenter: '中心', + horizontal: '水準', + selectionAlignment: '選擇對齊', + alignRight: '右', + alignBottom: '底', + alignMiddle: '中間', }, panel: { userInputField: '用戶輸入字段', From c95761f4e6234a27a31446bec768da72826d3324 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 6 Aug 2025 15:59:26 +0800 Subject: [PATCH 161/415] fix: resolve i18n workflow permissions and naming issues (#23494) --- .github/workflows/translate-i18n-base-on-english.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml index c79d58563f..1cb9c0967b 100644 --- a/.github/workflows/translate-i18n-base-on-english.yml +++ b/.github/workflows/translate-i18n-base-on-english.yml @@ -5,6 +5,10 @@ on: types: [closed] branches: [main] +permissions: + contents: write + pull-requests: write + jobs: check-and-update: if: github.event.pull_request.merged == true @@ -16,7 +20,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 2 # last 2 commits - persist-credentials: false + token: ${{ secrets.GITHUB_TOKEN }} - name: Check for file changes in i18n/en-US id: check_files @@ -49,7 +53,7 @@ jobs: if: env.FILES_CHANGED == 'true' run: pnpm install --frozen-lockfile - - name: Run npm script + - name: Generate i18n translations if: env.FILES_CHANGED == 'true' run: pnpm run auto-gen-i18n @@ -57,6 +61,7 @@ jobs: if: env.FILES_CHANGED == 'true' uses: peter-evans/create-pull-request@v6 with: + token: ${{ secrets.GITHUB_TOKEN }} commit-message: Update i18n files based on en-US changes title: 'chore: translate i18n files' body: This PR was automatically created to update i18n files based on changes in en-US locale. From ad61b4249417583852bafcfeb4b4669acbf500ed Mon Sep 17 00:00:00 2001 From: ghmark675 <188834327+ghmark675@users.noreply.github.com> Date: Thu, 7 Aug 2025 09:04:09 +0800 Subject: [PATCH 162/415] fix(node): fix unexpected extra equals sign in HTTP params (#23474) --- api/core/workflow/nodes/http_request/executor.py | 5 +++-- .../nodes/http_request/test_http_request_executor.py | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index 2106369bd6..e45f63bbec 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -91,7 +91,7 @@ class Executor: self.auth = node_data.authorization self.timeout = timeout self.ssl_verify = node_data.ssl_verify - self.params = [] + self.params = None self.headers = {} self.content = None self.files = None @@ -139,7 +139,8 @@ class Executor: (self.variable_pool.convert_template(key).text, self.variable_pool.convert_template(value_str).text) ) - self.params = result + if result: + self.params = result def _init_headers(self): """ diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py index bb6d72f51e..3101f7dd34 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py @@ -49,7 +49,7 @@ def test_executor_with_json_body_and_number_variable(): assert executor.method == "post" assert executor.url == "https://api.example.com/data" assert executor.headers == {"Content-Type": "application/json"} - assert executor.params == [] + assert executor.params is None assert executor.json == {"number": 42} assert executor.data is None assert executor.files is None @@ -102,7 +102,7 @@ def test_executor_with_json_body_and_object_variable(): assert executor.method == "post" assert executor.url == "https://api.example.com/data" assert executor.headers == {"Content-Type": "application/json"} - assert executor.params == [] + assert executor.params is None assert executor.json == {"name": "John Doe", "age": 30, "email": "john@example.com"} assert executor.data is None assert executor.files is None @@ -157,7 +157,7 @@ def test_executor_with_json_body_and_nested_object_variable(): assert executor.method == "post" assert executor.url == "https://api.example.com/data" assert executor.headers == {"Content-Type": "application/json"} - assert executor.params == [] + assert executor.params is None assert executor.json == {"object": {"name": "John Doe", "age": 30, "email": "john@example.com"}} assert executor.data is None assert executor.files is None @@ -245,7 +245,7 @@ def test_executor_with_form_data(): assert executor.url == "https://api.example.com/upload" assert "Content-Type" in executor.headers assert "multipart/form-data" in executor.headers["Content-Type"] - assert executor.params == [] + assert executor.params is None assert executor.json is None # '__multipart_placeholder__' is expected when no file inputs exist, # to ensure the request is treated as multipart/form-data by the backend. From 3ff52f1809d62f056b2c929c0ffbc2b8b1b67ea7 Mon Sep 17 00:00:00 2001 From: Guangdong Liu Date: Thu, 7 Aug 2025 09:04:51 +0800 Subject: [PATCH 163/415] feat: Enhance response validation and parsing in tool.py (#23456) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/tools/custom_tool/tool.py | 65 +++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 14 deletions(-) diff --git a/api/core/tools/custom_tool/tool.py b/api/core/tools/custom_tool/tool.py index 333ef2834c..e112de9578 100644 --- a/api/core/tools/custom_tool/tool.py +++ b/api/core/tools/custom_tool/tool.py @@ -1,7 +1,8 @@ import json from collections.abc import Generator +from dataclasses import dataclass from os import getenv -from typing import Any, Optional +from typing import Any, Optional, Union from urllib.parse import urlencode import httpx @@ -20,6 +21,20 @@ API_TOOL_DEFAULT_TIMEOUT = ( ) +@dataclass +class ParsedResponse: + """Represents a parsed HTTP response with type information""" + + content: Union[str, dict] + is_json: bool + + def to_string(self) -> str: + """Convert response to string format for credential validation""" + if isinstance(self.content, dict): + return json.dumps(self.content, ensure_ascii=False) + return str(self.content) + + class ApiTool(Tool): """ Api tool @@ -58,7 +73,9 @@ class ApiTool(Tool): response = self.do_http_request(self.api_bundle.server_url, self.api_bundle.method, headers, parameters) # validate response - return self.validate_and_parse_response(response) + parsed_response = self.validate_and_parse_response(response) + # For credential validation, always return as string + return parsed_response.to_string() def tool_provider_type(self) -> ToolProviderType: return ToolProviderType.API @@ -112,23 +129,36 @@ class ApiTool(Tool): return headers - def validate_and_parse_response(self, response: httpx.Response) -> str: + def validate_and_parse_response(self, response: httpx.Response) -> ParsedResponse: """ - validate the response + validate the response and return parsed content with type information + + :return: ParsedResponse with content and is_json flag """ if isinstance(response, httpx.Response): if response.status_code >= 400: raise ToolInvokeError(f"Request failed with status code {response.status_code} and {response.text}") if not response.content: - return "Empty response from the tool, please check your parameters and try again." + return ParsedResponse( + "Empty response from the tool, please check your parameters and try again.", False + ) + + # Check content type + content_type = response.headers.get("content-type", "").lower() + is_json_content_type = "application/json" in content_type + + # Try to parse as JSON try: - response = response.json() - try: - return json.dumps(response, ensure_ascii=False) - except Exception: - return json.dumps(response) + json_response = response.json() + # If content-type indicates JSON, return as JSON object + if is_json_content_type: + return ParsedResponse(json_response, True) + else: + # If content-type doesn't indicate JSON, treat as text regardless of content + return ParsedResponse(response.text, False) except Exception: - return response.text + # Not valid JSON, return as text + return ParsedResponse(response.text, False) else: raise ValueError(f"Invalid response type {type(response)}") @@ -369,7 +399,14 @@ class ApiTool(Tool): response = self.do_http_request(self.api_bundle.server_url, self.api_bundle.method, headers, tool_parameters) # validate response - response = self.validate_and_parse_response(response) + parsed_response = self.validate_and_parse_response(response) - # assemble invoke message - yield self.create_text_message(response) + # assemble invoke message based on response type + if parsed_response.is_json and isinstance(parsed_response.content, dict): + yield self.create_json_message(parsed_response.content) + else: + # Convert to string if needed and create text message + text_response = ( + parsed_response.content if isinstance(parsed_response.content, str) else str(parsed_response.content) + ) + yield self.create_text_message(text_response) From e072b7dafab932e01b9cc89a6cb39456a95e9ae7 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Thu, 7 Aug 2025 10:06:17 +0900 Subject: [PATCH 164/415] Chore: remove unused variable pruned_memory (#23514) --- api/controllers/console/files.py | 2 +- api/controllers/console/workspace/workspace.py | 5 ++--- api/controllers/service_api/app/file.py | 9 ++++----- api/controllers/service_api/dataset/document.py | 4 ++-- api/controllers/web/files.py | 5 ++--- api/core/memory/token_buffer_memory.py | 3 +-- 6 files changed, 12 insertions(+), 16 deletions(-) diff --git a/api/controllers/console/files.py b/api/controllers/console/files.py index 66b6214f82..256ff24b3b 100644 --- a/api/controllers/console/files.py +++ b/api/controllers/console/files.py @@ -49,7 +49,6 @@ class FileApi(Resource): @marshal_with(file_fields) @cloud_edition_billing_resource_check("documents") def post(self): - file = request.files["file"] source_str = request.form.get("source") source: Literal["datasets"] | None = "datasets" if source_str == "datasets" else None @@ -58,6 +57,7 @@ class FileApi(Resource): if len(request.files) > 1: raise TooManyFilesError() + file = request.files["file"] if not file.filename: raise FilenameNotExistsError diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 19999e7361..6012c9ecc8 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -191,9 +191,6 @@ class WebappLogoWorkspaceApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("workspace_custom") def post(self): - # get file from request - file = request.files["file"] - # check file if "file" not in request.files: raise NoFileUploadedError() @@ -201,6 +198,8 @@ class WebappLogoWorkspaceApi(Resource): if len(request.files) > 1: raise TooManyFilesError() + # get file from request + file = request.files["file"] if not file.filename: raise FilenameNotExistsError diff --git a/api/controllers/service_api/app/file.py b/api/controllers/service_api/app/file.py index b0fd8e65ef..f09d07bcb6 100644 --- a/api/controllers/service_api/app/file.py +++ b/api/controllers/service_api/app/file.py @@ -20,18 +20,17 @@ class FileApi(Resource): @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.FORM)) @marshal_with(file_fields) def post(self, app_model: App, end_user: EndUser): - file = request.files["file"] - # check file if "file" not in request.files: raise NoFileUploadedError() - if not file.mimetype: - raise UnsupportedFileTypeError() - if len(request.files) > 1: raise TooManyFilesError() + file = request.files["file"] + if not file.mimetype: + raise UnsupportedFileTypeError() + if not file.filename: raise FilenameNotExistsError diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index 77600aa18c..2955d5d20d 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -234,8 +234,6 @@ class DocumentAddByFileApi(DatasetApiResource): args["retrieval_model"].get("reranking_model").get("reranking_model_name"), ) - # save file info - file = request.files["file"] # check file if "file" not in request.files: raise NoFileUploadedError() @@ -243,6 +241,8 @@ class DocumentAddByFileApi(DatasetApiResource): if len(request.files) > 1: raise TooManyFilesError() + # save file info + file = request.files["file"] if not file.filename: raise FilenameNotExistsError diff --git a/api/controllers/web/files.py b/api/controllers/web/files.py index df06a73a85..8e9317606e 100644 --- a/api/controllers/web/files.py +++ b/api/controllers/web/files.py @@ -12,18 +12,17 @@ from services.file_service import FileService class FileApi(WebApiResource): @marshal_with(file_fields) def post(self, app_model, end_user): - file = request.files["file"] - source = request.form.get("source") - if "file" not in request.files: raise NoFileUploadedError() if len(request.files) > 1: raise TooManyFilesError() + file = request.files["file"] if not file.filename: raise FilenameNotExistsError + source = request.form.get("source") if source not in ("datasets", None): source = None diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 7ce124594a..91f17568b6 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -121,9 +121,8 @@ class TokenBufferMemory: curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) if curr_message_tokens > max_token_limit: - pruned_memory = [] while curr_message_tokens > max_token_limit and len(prompt_messages) > 1: - pruned_memory.append(prompt_messages.pop(0)) + prompt_messages.pop(0) curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) return prompt_messages From d253ca192a4323dc6914b2ab23bfbfee3df4cf5a Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Thu, 7 Aug 2025 09:13:30 +0800 Subject: [PATCH 165/415] Feat add testcontainers test for app service (#23523) --- .../services/test_app_service.py | 928 ++++++++++++++++++ 1 file changed, 928 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_app_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py new file mode 100644 index 0000000000..69cd9fafee --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -0,0 +1,928 @@ +from unittest.mock import patch + +import pytest +from faker import Faker + +from constants.model_template import default_app_templates +from models.model import App, Site +from services.account_service import AccountService, TenantService +from services.app_service import AppService + + +class TestAppService: + """Integration tests for AppService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.app_service.FeatureService") as mock_feature_service, + patch("services.app_service.EnterpriseService") as mock_enterprise_service, + patch("services.app_service.ModelManager") as mock_model_manager, + patch("services.account_service.FeatureService") as mock_account_feature_service, + ): + # Setup default mock returns for app service + mock_feature_service.get_system_features.return_value.webapp_auth.enabled = False + mock_enterprise_service.WebAppAuth.update_app_access_mode.return_value = None + mock_enterprise_service.WebAppAuth.cleanup_webapp.return_value = None + + # Setup default mock returns for account service + mock_account_feature_service.get_system_features.return_value.is_allow_register = True + + # Mock ModelManager for model configuration + mock_model_instance = mock_model_manager.return_value + mock_model_instance.get_default_model_instance.return_value = None + mock_model_instance.get_default_provider_model_name.return_value = ("openai", "gpt-3.5-turbo") + + yield { + "feature_service": mock_feature_service, + "enterprise_service": mock_enterprise_service, + "model_manager": mock_model_manager, + "account_feature_service": mock_account_feature_service, + } + + def test_create_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app creation with basic parameters. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Setup app creation arguments + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🤖", + "icon_background": "#FF6B6B", + "api_rph": 100, + "api_rpm": 10, + } + + # Create app + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Verify app was created correctly + assert app.name == app_args["name"] + assert app.description == app_args["description"] + assert app.mode == app_args["mode"] + assert app.icon_type == app_args["icon_type"] + assert app.icon == app_args["icon"] + assert app.icon_background == app_args["icon_background"] + assert app.tenant_id == tenant.id + assert app.api_rph == app_args["api_rph"] + assert app.api_rpm == app_args["api_rpm"] + assert app.created_by == account.id + assert app.updated_by == account.id + assert app.status == "normal" + assert app.enable_site is True + assert app.enable_api is True + assert app.is_demo is False + assert app.is_public is False + assert app.is_universal is False + + def test_create_app_with_different_modes(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app creation with different app modes. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + app_service = AppService() + + # Test different app modes + # from AppMode enum in default_app_model_template + app_modes = [v.value for v in default_app_templates] + + for mode in app_modes: + app_args = { + "name": f"{fake.company()} {mode}", + "description": f"Test app for {mode} mode", + "mode": mode, + "icon_type": "emoji", + "icon": "🚀", + "icon_background": "#4ECDC4", + } + + app = app_service.create_app(tenant.id, app_args, account) + + # Verify app mode was set correctly + assert app.mode == mode + assert app.name == app_args["name"] + assert app.tenant_id == tenant.id + assert app.created_by == account.id + + def test_get_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app retrieval. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🎯", + "icon_background": "#45B7D1", + } + + app_service = AppService() + created_app = app_service.create_app(tenant.id, app_args, account) + + # Get app using the service + retrieved_app = app_service.get_app(created_app) + + # Verify retrieved app matches created app + assert retrieved_app.id == created_app.id + assert retrieved_app.name == created_app.name + assert retrieved_app.description == created_app.description + assert retrieved_app.mode == created_app.mode + assert retrieved_app.tenant_id == created_app.tenant_id + assert retrieved_app.created_by == created_app.created_by + + def test_get_paginate_apps_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful paginated app list retrieval. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + app_service = AppService() + + # Create multiple apps + app_names = [fake.company() for _ in range(5)] + for name in app_names: + app_args = { + "name": name, + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "📱", + "icon_background": "#96CEB4", + } + app_service.create_app(tenant.id, app_args, account) + + # Get paginated apps + args = { + "page": 1, + "limit": 10, + "mode": "chat", + } + + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + + # Verify pagination results + assert paginated_apps is not None + assert len(paginated_apps.items) >= 5 # Should have at least 5 apps + assert paginated_apps.page == 1 + assert paginated_apps.per_page == 10 + + # Verify all apps belong to the correct tenant + for app in paginated_apps.items: + assert app.tenant_id == tenant.id + assert app.mode == "chat" + + def test_get_paginate_apps_with_filters(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test paginated app list with various filters. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + app_service = AppService() + + # Create apps with different modes + chat_app_args = { + "name": "Chat App", + "description": "A chat application", + "mode": "chat", + "icon_type": "emoji", + "icon": "💬", + "icon_background": "#FF6B6B", + } + completion_app_args = { + "name": "Completion App", + "description": "A completion application", + "mode": "completion", + "icon_type": "emoji", + "icon": "✍️", + "icon_background": "#4ECDC4", + } + + chat_app = app_service.create_app(tenant.id, chat_app_args, account) + completion_app = app_service.create_app(tenant.id, completion_app_args, account) + + # Test filter by mode + chat_args = { + "page": 1, + "limit": 10, + "mode": "chat", + } + chat_apps = app_service.get_paginate_apps(account.id, tenant.id, chat_args) + assert len(chat_apps.items) == 1 + assert chat_apps.items[0].mode == "chat" + + # Test filter by name + name_args = { + "page": 1, + "limit": 10, + "mode": "chat", + "name": "Chat", + } + filtered_apps = app_service.get_paginate_apps(account.id, tenant.id, name_args) + assert len(filtered_apps.items) == 1 + assert "Chat" in filtered_apps.items[0].name + + # Test filter by created_by_me + created_by_me_args = { + "page": 1, + "limit": 10, + "mode": "completion", + "is_created_by_me": True, + } + my_apps = app_service.get_paginate_apps(account.id, tenant.id, created_by_me_args) + assert len(my_apps.items) == 1 + + def test_get_paginate_apps_with_tag_filters(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test paginated app list with tag filters. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + app_service = AppService() + + # Create an app + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🏷️", + "icon_background": "#FFEAA7", + } + app = app_service.create_app(tenant.id, app_args, account) + + # Mock TagService to return the app ID for tag filtering + with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service: + mock_tag_service.return_value = [app.id] + + # Test with tag filter + args = { + "page": 1, + "limit": 10, + "mode": "chat", + "tag_ids": ["tag1", "tag2"], + } + + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + + # Verify tag service was called + mock_tag_service.assert_called_once_with("app", tenant.id, ["tag1", "tag2"]) + + # Verify results + assert paginated_apps is not None + assert len(paginated_apps.items) == 1 + assert paginated_apps.items[0].id == app.id + + # Test with tag filter that returns no results + with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service: + mock_tag_service.return_value = [] + + args = { + "page": 1, + "limit": 10, + "mode": "chat", + "tag_ids": ["nonexistent_tag"], + } + + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + + # Should return None when no apps match tag filter + assert paginated_apps is None + + def test_update_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app update with all fields. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🎯", + "icon_background": "#45B7D1", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original values + original_name = app.name + original_description = app.description + original_icon = app.icon + original_icon_background = app.icon_background + original_use_icon_as_answer_icon = app.use_icon_as_answer_icon + + # Update app + update_args = { + "name": "Updated App Name", + "description": "Updated app description", + "icon_type": "emoji", + "icon": "🔄", + "icon_background": "#FF8C42", + "use_icon_as_answer_icon": True, + } + + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app(app, update_args) + + # Verify updated fields + assert updated_app.name == update_args["name"] + assert updated_app.description == update_args["description"] + assert updated_app.icon == update_args["icon"] + assert updated_app.icon_background == update_args["icon_background"] + assert updated_app.use_icon_as_answer_icon is True + assert updated_app.updated_by == account.id + + # Verify other fields remain unchanged + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_update_app_name_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app name update. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🎯", + "icon_background": "#45B7D1", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original name + original_name = app.name + + # Update app name + new_name = "New App Name" + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_name(app, new_name) + + assert updated_app.name == new_name + assert updated_app.updated_by == account.id + + # Verify other fields remain unchanged + assert updated_app.description == app.description + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_update_app_icon_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app icon update. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🎯", + "icon_background": "#45B7D1", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original values + original_icon = app.icon + original_icon_background = app.icon_background + + # Update app icon + new_icon = "🌟" + new_icon_background = "#FFD93D" + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_icon(app, new_icon, new_icon_background) + + assert updated_app.icon == new_icon + assert updated_app.icon_background == new_icon_background + assert updated_app.updated_by == account.id + + # Verify other fields remain unchanged + assert updated_app.name == app.name + assert updated_app.description == app.description + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_update_app_site_status_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app site status update. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🌐", + "icon_background": "#74B9FF", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original site status + original_site_status = app.enable_site + + # Update site status to disabled + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_site_status(app, False) + assert updated_app.enable_site is False + assert updated_app.updated_by == account.id + + # Update site status back to enabled + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_site_status(updated_app, True) + assert updated_app.enable_site is True + assert updated_app.updated_by == account.id + + # Verify other fields remain unchanged + assert updated_app.name == app.name + assert updated_app.description == app.description + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_update_app_api_status_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app API status update. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🔌", + "icon_background": "#A29BFE", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original API status + original_api_status = app.enable_api + + # Update API status to disabled + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_api_status(app, False) + assert updated_app.enable_api is False + assert updated_app.updated_by == account.id + + # Update API status back to enabled + with patch("flask_login.utils._get_user", return_value=account): + updated_app = app_service.update_app_api_status(updated_app, True) + assert updated_app.enable_api is True + assert updated_app.updated_by == account.id + + # Verify other fields remain unchanged + assert updated_app.name == app.name + assert updated_app.description == app.description + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_update_app_site_status_no_change(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app site status update when status doesn't change. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🔄", + "icon_background": "#FD79A8", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store original values + original_site_status = app.enable_site + original_updated_at = app.updated_at + + # Update site status to the same value (no change) + updated_app = app_service.update_app_site_status(app, original_site_status) + + # Verify app is returned unchanged + assert updated_app.id == app.id + assert updated_app.enable_site == original_site_status + assert updated_app.updated_at == original_updated_at + + # Verify other fields remain unchanged + assert updated_app.name == app.name + assert updated_app.description == app.description + assert updated_app.mode == app.mode + assert updated_app.tenant_id == app.tenant_id + assert updated_app.created_by == app.created_by + + def test_delete_app_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app deletion. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🗑️", + "icon_background": "#E17055", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store app ID for verification + app_id = app.id + + # Mock the async deletion task + with patch("services.app_service.remove_app_and_related_data_task") as mock_delete_task: + mock_delete_task.delay.return_value = None + + # Delete app + app_service.delete_app(app) + + # Verify async deletion task was called + mock_delete_task.delay.assert_called_once_with(tenant_id=tenant.id, app_id=app_id) + + # Verify app was deleted from database + from extensions.ext_database import db + + deleted_app = db.session.query(App).filter_by(id=app_id).first() + assert deleted_app is None + + def test_delete_app_with_related_data(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app deletion with related data cleanup. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🧹", + "icon_background": "#00B894", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Store app ID for verification + app_id = app.id + + # Mock webapp auth cleanup + mock_external_service_dependencies[ + "feature_service" + ].get_system_features.return_value.webapp_auth.enabled = True + + # Mock the async deletion task + with patch("services.app_service.remove_app_and_related_data_task") as mock_delete_task: + mock_delete_task.delay.return_value = None + + # Delete app + app_service.delete_app(app) + + # Verify webapp auth cleanup was called + mock_external_service_dependencies["enterprise_service"].WebAppAuth.cleanup_webapp.assert_called_once_with( + app_id + ) + + # Verify async deletion task was called + mock_delete_task.delay.assert_called_once_with(tenant_id=tenant.id, app_id=app_id) + + # Verify app was deleted from database + from extensions.ext_database import db + + deleted_app = db.session.query(App).filter_by(id=app_id).first() + assert deleted_app is None + + def test_get_app_meta_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app metadata retrieval. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "📊", + "icon_background": "#6C5CE7", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Get app metadata + app_meta = app_service.get_app_meta(app) + + # Verify metadata contains expected fields + assert "tool_icons" in app_meta + # Note: get_app_meta currently only returns tool_icons + + def test_get_app_code_by_id_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app code retrieval by app ID. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🔗", + "icon_background": "#FDCB6E", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Get app code by ID + app_code = AppService.get_app_code_by_id(app.id) + + # Verify app code was retrieved correctly + # Note: Site would be created when App is created, site.code is auto-generated + assert app_code is not None + assert len(app_code) > 0 + + def test_get_app_id_by_code_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful app ID retrieval by app code. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app first + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🆔", + "icon_background": "#E84393", + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Create a site for the app + site = Site() + site.app_id = app.id + site.code = fake.postalcode() + site.title = fake.company() + site.status = "normal" + site.default_language = "en-US" + site.customize_token_strategy = "uuid" + from extensions.ext_database import db + + db.session.add(site) + db.session.commit() + + # Get app ID by code + app_id = AppService.get_app_id_by_code(site.code) + + # Verify app ID was retrieved correctly + assert app_id == app.id + + def test_create_app_invalid_mode(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test app creation with invalid mode. + """ + fake = Faker() + + # Create account and tenant first + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Setup app creation arguments with invalid mode + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "invalid_mode", # Invalid mode + "icon_type": "emoji", + "icon": "❌", + "icon_background": "#D63031", + } + + app_service = AppService() + + # Attempt to create app with invalid mode + with pytest.raises(ValueError, match="invalid mode value"): + app_service.create_app(tenant.id, app_args, account) From f6c717582870692c48dfd6390176b08efad968a0 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Thu, 7 Aug 2025 09:25:26 +0800 Subject: [PATCH 166/415] fix: make TagSelector always visible for accessibility and mobile support (#23515) --- .../datasets/{Container.tsx => container.tsx} | 6 +++--- .../datasets/{DatasetCard.tsx => dataset-card.tsx} | 11 +++-------- .../{DatasetFooter.tsx => dataset-footer.tsx} | 0 .../datasets/{Datasets.tsx => datasets.tsx} | 4 ++-- web/app/(commonLayout)/datasets/{Doc.tsx => doc.tsx} | 0 .../{NewDatasetCard.tsx => new-dataset-card.tsx} | 0 web/app/(commonLayout)/datasets/page.tsx | 2 +- web/app/components/apps/app-card.tsx | 12 +++--------- web/app/components/base/tag-management/selector.tsx | 6 +++++- web/service/access-control.ts | 4 ++-- 10 files changed, 19 insertions(+), 26 deletions(-) rename web/app/(commonLayout)/datasets/{Container.tsx => container.tsx} (98%) rename web/app/(commonLayout)/datasets/{DatasetCard.tsx => dataset-card.tsx} (94%) rename web/app/(commonLayout)/datasets/{DatasetFooter.tsx => dataset-footer.tsx} (100%) rename web/app/(commonLayout)/datasets/{Datasets.tsx => datasets.tsx} (96%) rename web/app/(commonLayout)/datasets/{Doc.tsx => doc.tsx} (100%) rename web/app/(commonLayout)/datasets/{NewDatasetCard.tsx => new-dataset-card.tsx} (100%) diff --git a/web/app/(commonLayout)/datasets/Container.tsx b/web/app/(commonLayout)/datasets/container.tsx similarity index 98% rename from web/app/(commonLayout)/datasets/Container.tsx rename to web/app/(commonLayout)/datasets/container.tsx index 112b6a752e..444119332b 100644 --- a/web/app/(commonLayout)/datasets/Container.tsx +++ b/web/app/(commonLayout)/datasets/container.tsx @@ -9,10 +9,10 @@ import { useQuery } from '@tanstack/react-query' // Components import ExternalAPIPanel from '../../components/datasets/external-api/external-api-panel' -import Datasets from './Datasets' -import DatasetFooter from './DatasetFooter' +import Datasets from './datasets' +import DatasetFooter from './dataset-footer' import ApiServer from '../../components/develop/ApiServer' -import Doc from './Doc' +import Doc from './doc' import TabSliderNew from '@/app/components/base/tab-slider-new' import TagManagementModal from '@/app/components/base/tag-management' import TagFilter from '@/app/components/base/tag-management/filter' diff --git a/web/app/(commonLayout)/datasets/DatasetCard.tsx b/web/app/(commonLayout)/datasets/dataset-card.tsx similarity index 94% rename from web/app/(commonLayout)/datasets/DatasetCard.tsx rename to web/app/(commonLayout)/datasets/dataset-card.tsx index 4b40be2c7f..58c82984ea 100644 --- a/web/app/(commonLayout)/datasets/DatasetCard.tsx +++ b/web/app/(commonLayout)/datasets/dataset-card.tsx @@ -162,24 +162,19 @@ const DatasetCard = ({
    {dataset.description}
    -
    +
    { e.stopPropagation() e.preventDefault() }}>
    { diff --git a/web/app/components/apps/app-card.tsx b/web/app/components/apps/app-card.tsx index 603b5922c5..a91c2edf1e 100644 --- a/web/app/components/apps/app-card.tsx +++ b/web/app/components/apps/app-card.tsx @@ -370,20 +370,14 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => { {app.description}
    -
    +
    {isCurrentWorkspaceEditor && ( <>
    { e.stopPropagation() e.preventDefault() }}> -
    +
    { />
    -
    +
    } diff --git a/web/app/components/base/tag-management/selector.tsx b/web/app/components/base/tag-management/selector.tsx index 026543cfa7..cb53aaa7ef 100644 --- a/web/app/components/base/tag-management/selector.tsx +++ b/web/app/components/base/tag-management/selector.tsx @@ -238,12 +238,16 @@ const TagSelector: FC = ({ }, [selectedTags, tagList]) const Trigger = () => { + const hasNoTags = !triggerContent return (
    -
    +
    {!triggerContent ? t('common.tag.addTag') : triggerContent}
    diff --git a/web/service/access-control.ts b/web/service/access-control.ts index d4cc9eb792..18dc9e0001 100644 --- a/web/service/access-control.ts +++ b/web/service/access-control.ts @@ -70,7 +70,7 @@ export const useUpdateAccessMode = () => { }) } -export const useGetUserCanAccessApp = ({ appId, isInstalledApp = true }: { appId?: string; isInstalledApp?: boolean; }) => { +export const useGetUserCanAccessApp = ({ appId, isInstalledApp = true, enabled }: { appId?: string; isInstalledApp?: boolean; enabled?: boolean }) => { const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) return useQuery({ queryKey: [NAME_SPACE, 'user-can-access-app', appId], @@ -80,7 +80,7 @@ export const useGetUserCanAccessApp = ({ appId, isInstalledApp = true }: { appId else return { result: true } }, - enabled: !!appId, + enabled: enabled !== undefined ? enabled : !!appId, staleTime: 0, gcTime: 0, }) From f3c98a274bb214d96b0c484f094773f161ea112c Mon Sep 17 00:00:00 2001 From: hangboss1761 <1240123692@qq.com> Date: Thu, 7 Aug 2025 10:06:04 +0800 Subject: [PATCH 167/415] fix: update the guiding text in the upload component (#23509) --- web/app/components/app/create-from-dsl-modal/uploader.tsx | 4 ++-- web/app/components/datasets/create/file-uploader/index.tsx | 2 +- web/i18n/de-DE/app.ts | 4 ++++ web/i18n/de-DE/dataset-creation.ts | 1 + web/i18n/en-US/app.ts | 4 ++++ web/i18n/en-US/dataset-creation.ts | 1 + web/i18n/es-ES/app.ts | 4 ++++ web/i18n/es-ES/dataset-creation.ts | 1 + web/i18n/fa-IR/app.ts | 4 ++++ web/i18n/fa-IR/dataset-creation.ts | 1 + web/i18n/fr-FR/app.ts | 6 +++++- web/i18n/fr-FR/dataset-creation.ts | 1 + web/i18n/hi-IN/app.ts | 4 ++++ web/i18n/hi-IN/dataset-creation.ts | 1 + web/i18n/it-IT/app.ts | 4 ++++ web/i18n/it-IT/dataset-creation.ts | 1 + web/i18n/ja-JP/app.ts | 4 ++++ web/i18n/ja-JP/dataset-creation.ts | 1 + web/i18n/ko-KR/app.ts | 4 ++++ web/i18n/ko-KR/dataset-creation.ts | 1 + web/i18n/pl-PL/app.ts | 4 ++++ web/i18n/pl-PL/dataset-creation.ts | 1 + web/i18n/pt-BR/app.ts | 4 ++++ web/i18n/pt-BR/dataset-creation.ts | 1 + web/i18n/ro-RO/app.ts | 4 ++++ web/i18n/ro-RO/dataset-creation.ts | 1 + web/i18n/ru-RU/app.ts | 4 ++++ web/i18n/ru-RU/dataset-creation.ts | 1 + web/i18n/sl-SI/app.ts | 4 ++++ web/i18n/sl-SI/dataset-creation.ts | 1 + web/i18n/th-TH/app.ts | 4 ++++ web/i18n/th-TH/dataset-creation.ts | 1 + web/i18n/tr-TR/app.ts | 4 ++++ web/i18n/tr-TR/dataset-creation.ts | 1 + web/i18n/uk-UA/app.ts | 4 ++++ web/i18n/uk-UA/dataset-creation.ts | 1 + web/i18n/vi-VN/app.ts | 4 ++++ web/i18n/vi-VN/dataset-creation.ts | 1 + web/i18n/zh-Hans/app.ts | 4 ++++ web/i18n/zh-Hans/dataset-creation.ts | 1 + web/i18n/zh-Hant/app.ts | 4 ++++ web/i18n/zh-Hant/dataset-creation.ts | 1 + 42 files changed, 104 insertions(+), 4 deletions(-) diff --git a/web/app/components/app/create-from-dsl-modal/uploader.tsx b/web/app/components/app/create-from-dsl-modal/uploader.tsx index 6ad4116dd6..3ab54733dc 100644 --- a/web/app/components/app/create-from-dsl-modal/uploader.tsx +++ b/web/app/components/app/create-from-dsl-modal/uploader.tsx @@ -106,8 +106,8 @@ const Uploader: FC = ({
    - {t('datasetCreation.stepOne.uploader.button')} - {t('datasetDocuments.list.batchModal.browse')} + {t('app.dslUploader.button')} + {t('app.dslUploader.browse')}
    {dragging &&
    } diff --git a/web/app/components/datasets/create/file-uploader/index.tsx b/web/app/components/datasets/create/file-uploader/index.tsx index 65b26e7458..fd7d3f4c0f 100644 --- a/web/app/components/datasets/create/file-uploader/index.tsx +++ b/web/app/components/datasets/create/file-uploader/index.tsx @@ -313,7 +313,7 @@ const FileUploader = ({ - {t('datasetCreation.stepOne.uploader.button')} + {notSupportBatchUpload ? t('datasetCreation.stepOne.uploader.buttonSingleFile') : t('datasetCreation.stepOne.uploader.button')} {supportTypes.length > 0 && ( )} diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index 0013a89561..c3826e658e 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -166,6 +166,10 @@ const translation = { description: 'Gibt an, ob das web app Symbol zum Ersetzen 🤖 in der freigegebenen Anwendung verwendet werden soll', }, importFromDSLUrlPlaceholder: 'DSL-Link hier einfügen', + dslUploader: { + button: 'Datei per Drag & Drop ablegen oder', + browse: 'Durchsuchen', + }, duplicate: 'Duplikat', importFromDSL: 'Import von DSL', importDSL: 'DSL-Datei importieren', diff --git a/web/i18n/de-DE/dataset-creation.ts b/web/i18n/de-DE/dataset-creation.ts index a26feb314a..bef64c7fb5 100644 --- a/web/i18n/de-DE/dataset-creation.ts +++ b/web/i18n/de-DE/dataset-creation.ts @@ -21,6 +21,7 @@ const translation = { uploader: { title: 'Textdatei hochladen', button: 'Dateien und Ordner hierher ziehen oder klicken', + buttonSingleFile: 'Datei hierher ziehen oder klicken', browse: 'Durchsuchen', tip: 'Unterstützt {{supportTypes}}. Maximal {{size}}MB pro Datei.', validation: { diff --git a/web/i18n/en-US/app.ts b/web/i18n/en-US/app.ts index 06b5e8ded1..28b8b84051 100644 --- a/web/i18n/en-US/app.ts +++ b/web/i18n/en-US/app.ts @@ -23,6 +23,10 @@ const translation = { importFromDSLFile: 'From DSL file', importFromDSLUrl: 'From URL', importFromDSLUrlPlaceholder: 'Paste DSL link here', + dslUploader: { + button: 'Drag and drop file, or', + browse: 'Browse', + }, deleteAppConfirmTitle: 'Delete this app?', deleteAppConfirmContent: 'Deleting the app is irreversible. Users will no longer be able to access your app, and all prompt configurations and logs will be permanently deleted.', diff --git a/web/i18n/en-US/dataset-creation.ts b/web/i18n/en-US/dataset-creation.ts index 90bac8dcb8..25f81547b8 100644 --- a/web/i18n/en-US/dataset-creation.ts +++ b/web/i18n/en-US/dataset-creation.ts @@ -36,6 +36,7 @@ const translation = { uploader: { title: 'Upload file', button: 'Drag and drop file or folder, or', + buttonSingleFile: 'Drag and drop file, or', browse: 'Browse', tip: 'Supports {{supportTypes}}. Max {{size}}MB each.', validation: { diff --git a/web/i18n/es-ES/app.ts b/web/i18n/es-ES/app.ts index d8f6a2cec4..f2df9a45a7 100644 --- a/web/i18n/es-ES/app.ts +++ b/web/i18n/es-ES/app.ts @@ -170,6 +170,10 @@ const translation = { }, importFromDSLUrl: 'URL de origen', importFromDSLUrlPlaceholder: 'Pegar enlace DSL aquí', + dslUploader: { + button: 'Arrastrar y soltar archivo, o', + browse: 'Examinar', + }, importFromDSL: 'Importar desde DSL', importFromDSLFile: 'Desde el archivo DSL', mermaid: { diff --git a/web/i18n/es-ES/dataset-creation.ts b/web/i18n/es-ES/dataset-creation.ts index c361884051..d27dda050b 100644 --- a/web/i18n/es-ES/dataset-creation.ts +++ b/web/i18n/es-ES/dataset-creation.ts @@ -26,6 +26,7 @@ const translation = { uploader: { title: 'Cargar archivo', button: 'Arrastre y suelte archivos o carpetas, o', + buttonSingleFile: 'Arrastre y suelte archivo, o', browse: 'Buscar', tip: 'Soporta {{supportTypes}}. Máximo {{size}}MB cada uno.', validation: { diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts index e28aa1946c..cd848ec21d 100644 --- a/web/i18n/fa-IR/app.ts +++ b/web/i18n/fa-IR/app.ts @@ -19,6 +19,10 @@ const translation = { importFromDSLFile: 'از فایل DSL', importFromDSLUrl: 'از URL', importFromDSLUrlPlaceholder: 'لینک DSL را اینجا بچسبانید', + dslUploader: { + button: 'فایل را بکشید و رها کنید، یا', + browse: 'مرور', + }, deleteAppConfirmTitle: 'آیا این برنامه حذف شود؟', deleteAppConfirmContent: 'حذف برنامه غیرقابل برگشت است. کاربران دیگر قادر به دسترسی به برنامه شما نخواهند بود و تمام تنظیمات و گزارشات درخواست‌ها به صورت دائم حذف خواهند شد.', diff --git a/web/i18n/fa-IR/dataset-creation.ts b/web/i18n/fa-IR/dataset-creation.ts index bf3a03e0c2..105753a249 100644 --- a/web/i18n/fa-IR/dataset-creation.ts +++ b/web/i18n/fa-IR/dataset-creation.ts @@ -28,6 +28,7 @@ const translation = { uploader: { title: 'بارگذاری فایل', button: 'فایل ها یا پوشه ها را بکشید و رها کنید یا', + buttonSingleFile: 'فایل را بکشید و رها کنید یا', browse: 'مرور', tip: 'پشتیبانی از {{supportTypes}}. حداکثر {{size}}MB هر کدام.', validation: { diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts index a34d6a31da..8c0e0f516c 100644 --- a/web/i18n/fr-FR/app.ts +++ b/web/i18n/fr-FR/app.ts @@ -169,7 +169,11 @@ const translation = { descriptionInExplore: 'Utilisation de l’icône web app pour remplacer 🤖 dans Explore', }, importFromDSLUrlPlaceholder: 'Collez le lien DSL ici', - importFromDSL: 'Importation à partir d’une DSL', + dslUploader: { + button: 'Glisser-déposer un fichier, ou', + browse: 'Parcourir', + }, + importFromDSL: 'Importation à partir d\'une DSL', importFromDSLUrl: 'À partir de l’URL', importFromDSLFile: 'À partir d’un fichier DSL', mermaid: { diff --git a/web/i18n/fr-FR/dataset-creation.ts b/web/i18n/fr-FR/dataset-creation.ts index e1a5a85a8b..e306589989 100644 --- a/web/i18n/fr-FR/dataset-creation.ts +++ b/web/i18n/fr-FR/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Télécharger le fichier texte', button: 'Faites glisser et déposez des fichiers ou des dossiers, ou', + buttonSingleFile: 'Faites glisser et déposez un fichier, ou', browse: 'Parcourir', tip: 'Prend en charge {{supportTypes}}. Max {{size}}MB chacun.', validation: { diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index fc60901452..cce3fe1f4d 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -172,6 +172,10 @@ const translation = { importFromDSLUrl: 'यूआरएल से', importFromDSL: 'DSL से आयात करें', importFromDSLUrlPlaceholder: 'डीएसएल लिंक यहां पेस्ट करें', + dslUploader: { + button: 'फ़ाइल खींचकर छोड़ें, या', + browse: 'ब्राउज़ करें', + }, mermaid: { handDrawn: 'हाथ खींचा', classic: 'क्लासिक', diff --git a/web/i18n/hi-IN/dataset-creation.ts b/web/i18n/hi-IN/dataset-creation.ts index 64aff7193f..c91946302c 100644 --- a/web/i18n/hi-IN/dataset-creation.ts +++ b/web/i18n/hi-IN/dataset-creation.ts @@ -28,6 +28,7 @@ const translation = { uploader: { title: 'फ़ाइल अपलोड करें', button: 'फ़ाइलों या फ़ोल्डरों को खींचें और छोड़ें, या', + buttonSingleFile: 'फ़ाइल को खींचें और छोड़ें, या', browse: 'ब्राउज़ करें', tip: 'समर्थित {{supportTypes}}। प्रत्येक अधिकतम {{size}}MB।', validation: { diff --git a/web/i18n/it-IT/app.ts b/web/i18n/it-IT/app.ts index 01ee29423e..9c58c5e803 100644 --- a/web/i18n/it-IT/app.ts +++ b/web/i18n/it-IT/app.ts @@ -178,6 +178,10 @@ const translation = { importFromDSLFile: 'Da file DSL', importFromDSL: 'Importazione da DSL', importFromDSLUrlPlaceholder: 'Incolla qui il link DSL', + dslUploader: { + button: 'Trascina e rilascia il file, o', + browse: 'Sfoglia', + }, mermaid: { handDrawn: 'Disegnato a mano', classic: 'Classico', diff --git a/web/i18n/it-IT/dataset-creation.ts b/web/i18n/it-IT/dataset-creation.ts index 18df6a505f..89b739a0ce 100644 --- a/web/i18n/it-IT/dataset-creation.ts +++ b/web/i18n/it-IT/dataset-creation.ts @@ -28,6 +28,7 @@ const translation = { uploader: { title: 'Carica file', button: 'Trascina e rilascia file o cartelle, oppure', + buttonSingleFile: 'Trascina e rilascia un file, oppure', browse: 'Sfoglia', tip: 'Supporta {{supportTypes}}. Max {{size}}MB ciascuno.', validation: { diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts index d05bf6c353..3e399debc1 100644 --- a/web/i18n/ja-JP/app.ts +++ b/web/i18n/ja-JP/app.ts @@ -23,6 +23,10 @@ const translation = { importFromDSLFile: 'DSL ファイルから', importFromDSLUrl: 'URL から', importFromDSLUrlPlaceholder: 'DSL リンクをここに貼り付けます', + dslUploader: { + button: 'ファイルをドラッグ&ドロップするか、', + browse: '参照', + }, deleteAppConfirmTitle: 'このアプリを削除しますか?', deleteAppConfirmContent: 'アプリを削除すると、元に戻すことはできません。他のユーザーはもはやこのアプリにアクセスできず、すべてのプロンプトの設定とログが永久に削除されます。', diff --git a/web/i18n/ja-JP/dataset-creation.ts b/web/i18n/ja-JP/dataset-creation.ts index 262f0cf8d9..439dd75371 100644 --- a/web/i18n/ja-JP/dataset-creation.ts +++ b/web/i18n/ja-JP/dataset-creation.ts @@ -31,6 +31,7 @@ const translation = { uploader: { title: 'テキストファイルをアップロード', button: 'ファイルまたはフォルダをドラッグアンドドロップする', + buttonSingleFile: 'ファイルをドラッグアンドドロップする', browse: '参照', tip: '{{supportTypes}}をサポートしています。1 つあたりの最大サイズは{{size}}MB です。', validation: { diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts index f0d666301a..5ddb823998 100644 --- a/web/i18n/ko-KR/app.ts +++ b/web/i18n/ko-KR/app.ts @@ -189,6 +189,10 @@ const translation = { importFromDSLFile: 'DSL 파일에서', importFromDSLUrl: 'URL 에서', importFromDSLUrlPlaceholder: '여기에 DSL 링크 붙여 넣기', + dslUploader: { + button: '파일을 드래그 앤 드롭하거나', + browse: '찾아보기', + }, mermaid: { handDrawn: '손으로 그린', classic: '고전', diff --git a/web/i18n/ko-KR/dataset-creation.ts b/web/i18n/ko-KR/dataset-creation.ts index 1c03b0ae7d..ffc00c48ea 100644 --- a/web/i18n/ko-KR/dataset-creation.ts +++ b/web/i18n/ko-KR/dataset-creation.ts @@ -21,6 +21,7 @@ const translation = { uploader: { title: '텍스트 파일 업로드', button: '파일이나 폴더를 끌어서 놓기', + buttonSingleFile: '파일을 끌어서 놓기', browse: '찾아보기', tip: '{{supportTypes}}을 (를) 지원합니다. 파일당 최대 크기는 {{size}}MB 입니다.', validation: { diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts index 8751dedc99..0a5e6a56d1 100644 --- a/web/i18n/pl-PL/app.ts +++ b/web/i18n/pl-PL/app.ts @@ -173,6 +173,10 @@ const translation = { importFromDSLUrl: 'Z adresu URL', importFromDSLFile: 'Z pliku DSL', importFromDSLUrlPlaceholder: 'Wklej tutaj link DSL', + dslUploader: { + button: 'Przeciągnij i upuść plik, lub', + browse: 'Przeglądaj', + }, mermaid: { handDrawn: 'Ręcznie rysowane', classic: 'Klasyczny', diff --git a/web/i18n/pl-PL/dataset-creation.ts b/web/i18n/pl-PL/dataset-creation.ts index 83e6eab96b..28e400fd22 100644 --- a/web/i18n/pl-PL/dataset-creation.ts +++ b/web/i18n/pl-PL/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Prześlij plik tekstowy', button: 'Przeciągnij i upuść pliki lub foldery lub', + buttonSingleFile: 'Przeciągnij i upuść plik lub', browse: 'Przeglądaj', tip: 'Obsługuje {{supportTypes}}. Maksymalnie {{size}}MB każdy.', validation: { diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts index 1f44ae9e5a..bbb420351c 100644 --- a/web/i18n/pt-BR/app.ts +++ b/web/i18n/pt-BR/app.ts @@ -169,6 +169,10 @@ const translation = { title: 'Use o ícone do web app para substituir 🤖', }, importFromDSLUrlPlaceholder: 'Cole o link DSL aqui', + dslUploader: { + button: 'Arraste e solte o arquivo, ou', + browse: 'Navegar', + }, importFromDSLUrl: 'Do URL', importFromDSLFile: 'Do arquivo DSL', importFromDSL: 'Importar de DSL', diff --git a/web/i18n/pt-BR/dataset-creation.ts b/web/i18n/pt-BR/dataset-creation.ts index db3b5af302..e2668c818f 100644 --- a/web/i18n/pt-BR/dataset-creation.ts +++ b/web/i18n/pt-BR/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Enviar arquivo de texto', button: 'Arraste e solte arquivos ou pastas, ou', + buttonSingleFile: 'Arraste e solte um arquivo, ou', browse: 'Navegar', tip: 'Suporta {{supportTypes}}. Máximo de {{size}}MB cada.', validation: { diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts index 2559eea20f..73b01ac704 100644 --- a/web/i18n/ro-RO/app.ts +++ b/web/i18n/ro-RO/app.ts @@ -171,6 +171,10 @@ const translation = { importFromDSL: 'Import din DSL', importFromDSLUrl: 'De la URL', importFromDSLUrlPlaceholder: 'Lipiți linkul DSL aici', + dslUploader: { + button: 'Trageți și plasați fișierul, sau', + browse: 'Răsfoiți', + }, importFromDSLFile: 'Din fișierul DSL', mermaid: { handDrawn: 'Desenat de mână', diff --git a/web/i18n/ro-RO/dataset-creation.ts b/web/i18n/ro-RO/dataset-creation.ts index 77c3bce88a..0849d4dc87 100644 --- a/web/i18n/ro-RO/dataset-creation.ts +++ b/web/i18n/ro-RO/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Încărcați fișier text', button: 'Trageți și plasați fișiere sau foldere sau', + buttonSingleFile: 'Trageți și plasați un fișier sau', browse: 'Răsfoire', tip: 'Acceptă {{supportTypes}}. Maxim {{size}}MB fiecare.', validation: { diff --git a/web/i18n/ru-RU/app.ts b/web/i18n/ru-RU/app.ts index bc15d16ee1..fa7b79a377 100644 --- a/web/i18n/ru-RU/app.ts +++ b/web/i18n/ru-RU/app.ts @@ -19,6 +19,10 @@ const translation = { importFromDSLFile: 'Из файла DSL', importFromDSLUrl: 'Из URL', importFromDSLUrlPlaceholder: 'Вставьте ссылку DSL сюда', + dslUploader: { + button: 'Перетащите файл, или', + browse: 'Обзор', + }, deleteAppConfirmTitle: 'Удалить это приложение?', deleteAppConfirmContent: 'Удаление приложения необратимо. Пользователи больше не смогут получить доступ к вашему приложению, и все настройки подсказок и журналы будут безвозвратно удалены.', diff --git a/web/i18n/ru-RU/dataset-creation.ts b/web/i18n/ru-RU/dataset-creation.ts index 49b2f2087c..bf2532836c 100644 --- a/web/i18n/ru-RU/dataset-creation.ts +++ b/web/i18n/ru-RU/dataset-creation.ts @@ -28,6 +28,7 @@ const translation = { uploader: { title: 'Загрузить файл', button: 'Перетащите файлы или папки или', + buttonSingleFile: 'Перетащите файл или', browse: 'Обзор', tip: 'Поддерживаются {{supportTypes}}. Максимум {{size}} МБ каждый.', validation: { diff --git a/web/i18n/sl-SI/app.ts b/web/i18n/sl-SI/app.ts index 61c479e65f..0d286b4c13 100644 --- a/web/i18n/sl-SI/app.ts +++ b/web/i18n/sl-SI/app.ts @@ -19,6 +19,10 @@ const translation = { importFromDSLFile: 'Iz datoteke DSL', importFromDSLUrl: 'Iz URL-ja', importFromDSLUrlPlaceholder: 'Tukaj prilepi povezavo DSL', + dslUploader: { + button: 'Povlecite in spustite datoteko, ali', + browse: 'Prebrskaj', + }, deleteAppConfirmTitle: 'Izbrišem to aplikacijo?', deleteAppConfirmContent: 'Brisanje aplikacije je nepopravljivo. Uporabniki ne bodo več imeli dostopa do vaše aplikacije, vse konfiguracije in dnevniki pa bodo trajno izbrisani.', diff --git a/web/i18n/sl-SI/dataset-creation.ts b/web/i18n/sl-SI/dataset-creation.ts index bbe98799d2..08e65c2437 100644 --- a/web/i18n/sl-SI/dataset-creation.ts +++ b/web/i18n/sl-SI/dataset-creation.ts @@ -33,6 +33,7 @@ const translation = { uploader: { title: 'Naloži datoteko', button: 'Povleci in spusti datoteke ali mape oz', + buttonSingleFile: 'Povleci in spusti datoteko oz', browse: 'Prebrskaj', tip: 'Podprti tipi datotek: {{supportTypes}}. Največ {{size}}MB na datoteko.', validation: { diff --git a/web/i18n/th-TH/app.ts b/web/i18n/th-TH/app.ts index d0e3394ff8..adb377d6fc 100644 --- a/web/i18n/th-TH/app.ts +++ b/web/i18n/th-TH/app.ts @@ -19,6 +19,10 @@ const translation = { importFromDSLFile: 'จากไฟล์ DSL', importFromDSLUrl: 'จาก URL', importFromDSLUrlPlaceholder: 'วางลิงค์ DSL ที่นี่', + dslUploader: { + button: 'ลากและวางไฟล์ หรือ', + browse: 'เรียกดู', + }, deleteAppConfirmTitle: 'ลบโปรเจกต์นี้?', deleteAppConfirmContent: 'การลบโปรเจกนั้นไม่สามารถย้อนกลับได้ ผู้ใช้จะไม่สามารถเข้าถึงโปรเจกต์ของคุณอีกต่อไป และการกําหนดค่าต่างๆและบันทึกทั้งหมดจะถูกลบอย่างถาวร', appDeleted: 'โปรเจกต์ถูกลบ', diff --git a/web/i18n/th-TH/dataset-creation.ts b/web/i18n/th-TH/dataset-creation.ts index cd318984b1..795444cfab 100644 --- a/web/i18n/th-TH/dataset-creation.ts +++ b/web/i18n/th-TH/dataset-creation.ts @@ -33,6 +33,7 @@ const translation = { uploader: { title: 'อัปโหลดไฟล์', button: 'ลากและวางไฟล์หรือโฟลเดอร์หรือ', + buttonSingleFile: 'ลากและวางไฟล์หรือ', browse: 'เล็ม', tip: 'รองรับ {{supportTypes}} สูงสุด {{size}}MB แต่ละตัว', validation: { diff --git a/web/i18n/tr-TR/app.ts b/web/i18n/tr-TR/app.ts index 1852ee29d2..61a7eb926e 100644 --- a/web/i18n/tr-TR/app.ts +++ b/web/i18n/tr-TR/app.ts @@ -19,6 +19,10 @@ const translation = { importFromDSLFile: 'DSL dosyasından', importFromDSLUrl: 'URL\'den', importFromDSLUrlPlaceholder: 'DSL bağlantısını buraya yapıştır', + dslUploader: { + button: 'Dosyayı sürükleyip bırakın veya', + browse: 'Gözat', + }, deleteAppConfirmTitle: 'Bu uygulamayı silmek istiyor musunuz?', deleteAppConfirmContent: 'Uygulamanın silinmesi geri alınamaz. Kullanıcılar artık uygulamanıza erişemeyecek ve tüm prompt yapılandırmaları ile loglar kalıcı olarak silinecektir.', appDeleted: 'Uygulama silindi', diff --git a/web/i18n/tr-TR/dataset-creation.ts b/web/i18n/tr-TR/dataset-creation.ts index 41dc49b7ca..32fb8165eb 100644 --- a/web/i18n/tr-TR/dataset-creation.ts +++ b/web/i18n/tr-TR/dataset-creation.ts @@ -28,6 +28,7 @@ const translation = { uploader: { title: 'Dosya yükle', button: 'Dosyaları veya klasörleri sürükleyip bırakın veya', + buttonSingleFile: 'Dosyayı sürükleyip bırakın veya', browse: 'Göz atın', tip: 'Destekler {{supportTypes}}. Her biri en fazla {{size}}MB.', validation: { diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts index 77b98beebe..aab4498acb 100644 --- a/web/i18n/uk-UA/app.ts +++ b/web/i18n/uk-UA/app.ts @@ -171,6 +171,10 @@ const translation = { importFromDSLUrl: 'З URL', importFromDSL: 'Імпорт з DSL', importFromDSLUrlPlaceholder: 'Вставте посилання на DSL тут', + dslUploader: { + button: 'Перетягніть файл, або', + browse: 'Огляд', + }, importFromDSLFile: 'З DSL-файлу', mermaid: { handDrawn: 'Намальовані від руки', diff --git a/web/i18n/uk-UA/dataset-creation.ts b/web/i18n/uk-UA/dataset-creation.ts index 72b7a8c05c..8ea32c0d81 100644 --- a/web/i18n/uk-UA/dataset-creation.ts +++ b/web/i18n/uk-UA/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Завантажити текстовий файл', button: 'Перетягніть файли або папки або', + buttonSingleFile: 'Перетягніть файл або', browse: 'Оберіть', tip: 'Підтримуються {{supportTypes}}. Максимум {{size}} МБ кожен.', validation: { diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts index 7a992bef77..00e1a0bd5a 100644 --- a/web/i18n/vi-VN/app.ts +++ b/web/i18n/vi-VN/app.ts @@ -171,6 +171,10 @@ const translation = { importFromDSLFile: 'Từ tệp DSL', importFromDSL: 'Nhập từ DSL', importFromDSLUrlPlaceholder: 'Dán liên kết DSL vào đây', + dslUploader: { + button: 'Kéo và thả tệp, hoặc', + browse: 'Duyệt', + }, importFromDSLUrl: 'Từ URL', mermaid: { handDrawn: 'Vẽ tay', diff --git a/web/i18n/vi-VN/dataset-creation.ts b/web/i18n/vi-VN/dataset-creation.ts index 34ef374500..39215fde68 100644 --- a/web/i18n/vi-VN/dataset-creation.ts +++ b/web/i18n/vi-VN/dataset-creation.ts @@ -23,6 +23,7 @@ const translation = { uploader: { title: 'Tải lên tệp văn bản', button: 'Kéo và thả các tập tin hoặc thư mục, hoặc', + buttonSingleFile: 'Kéo và thả tệp hoặc', browse: 'Chọn tệp', tip: 'Hỗ trợ {{supportTypes}}. Tối đa {{size}}MB mỗi tệp.', validation: { diff --git a/web/i18n/zh-Hans/app.ts b/web/i18n/zh-Hans/app.ts index 7c8b292ce4..32cfc4cf36 100644 --- a/web/i18n/zh-Hans/app.ts +++ b/web/i18n/zh-Hans/app.ts @@ -23,6 +23,10 @@ const translation = { importFromDSLFile: '文件', importFromDSLUrl: 'URL', importFromDSLUrlPlaceholder: '输入 DSL 文件的 URL', + dslUploader: { + button: '拖拽文件至此,或者', + browse: '选择文件', + }, deleteAppConfirmTitle: '确认删除应用?', deleteAppConfirmContent: '删除应用将无法撤销。用户将不能访问你的应用,所有 Prompt 编排配置和日志均将一并被删除。', diff --git a/web/i18n/zh-Hans/dataset-creation.ts b/web/i18n/zh-Hans/dataset-creation.ts index c8d4a82244..48fb39fd45 100644 --- a/web/i18n/zh-Hans/dataset-creation.ts +++ b/web/i18n/zh-Hans/dataset-creation.ts @@ -36,6 +36,7 @@ const translation = { uploader: { title: '上传文本文件', button: '拖拽文件或文件夹至此,或者', + buttonSingleFile: '拖拽文件至此,或者', browse: '选择文件', tip: '已支持 {{supportTypes}},每个文件不超过 {{size}}MB。', validation: { diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts index 0bf99d5067..8d1cc69f2a 100644 --- a/web/i18n/zh-Hant/app.ts +++ b/web/i18n/zh-Hant/app.ts @@ -171,6 +171,10 @@ const translation = { importFromDSL: '從 DSL 導入', importFromDSLFile: '從 DSL 檔', importFromDSLUrlPlaceholder: '在此處貼上 DSL 連結', + dslUploader: { + button: '拖拽檔案至此,或者', + browse: '選擇檔案', + }, mermaid: { handDrawn: '手繪', classic: '經典', diff --git a/web/i18n/zh-Hant/dataset-creation.ts b/web/i18n/zh-Hant/dataset-creation.ts index e99fb0c320..3f39691e0b 100644 --- a/web/i18n/zh-Hant/dataset-creation.ts +++ b/web/i18n/zh-Hant/dataset-creation.ts @@ -21,6 +21,7 @@ const translation = { uploader: { title: '上傳文字檔案', button: '拖拽檔案或檔案夾至此,或者', + buttonSingleFile: '拖拽檔案至此,或者', browse: '選擇檔案', tip: '已支援 {{supportTypes}},每個檔案不超過 {{size}}MB。', validation: { From 85f33fb73d154c5b8312dd06785baceb4d85b4ef Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Wed, 6 Aug 2025 19:55:41 -0700 Subject: [PATCH 168/415] chore: add template for required fields (#23533) --- .github/ISSUE_TEMPLATE/chore.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/chore.yaml b/.github/ISSUE_TEMPLATE/chore.yaml index 43449ef942..cf74dcc546 100644 --- a/.github/ISSUE_TEMPLATE/chore.yaml +++ b/.github/ISSUE_TEMPLATE/chore.yaml @@ -4,6 +4,23 @@ title: "[Chore/Refactor] " labels: - refactor body: + - type: checkboxes + attributes: + label: Self Checks + description: "To make sure we get to you in time, please check the following :)" + options: + - label: I have read the [Contributing Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) and [Language Policy](https://github.com/langgenius/dify/issues/1542). + required: true + - label: This is only for refactoring, if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general). + required: true + - label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones. + required: true + - label: I confirm that I am using English to submit this report, otherwise it will be closed. + required: true + - label: 【中文用户 & Non English User】请使用英语提交,否则会被关闭 :) + required: true + - label: "Please do not modify this template :) and fill in all the required fields." + required: true - type: textarea id: description attributes: From ad1b1193faeccfc5f4d565d545000490235081ac Mon Sep 17 00:00:00 2001 From: goofy <38034027+goofy-z@users.noreply.github.com> Date: Thu, 7 Aug 2025 11:14:45 +0800 Subject: [PATCH 169/415] fix localtime_to_timestamp tool throws 'no attribute localize error' when it executes without specifying a timezone parameter (#23517) --- .../providers/time/tools/localtime_to_timestamp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/core/tools/builtin_tool/providers/time/tools/localtime_to_timestamp.py b/api/core/tools/builtin_tool/providers/time/tools/localtime_to_timestamp.py index 1639dd687f..a8fd6ec2cd 100644 --- a/api/core/tools/builtin_tool/providers/time/tools/localtime_to_timestamp.py +++ b/api/core/tools/builtin_tool/providers/time/tools/localtime_to_timestamp.py @@ -37,12 +37,12 @@ class LocaltimeToTimestampTool(BuiltinTool): @staticmethod def localtime_to_timestamp(localtime: str, time_format: str, local_tz=None) -> int | None: try: - if local_tz is None: - local_tz = datetime.now().astimezone().tzinfo - if isinstance(local_tz, str): - local_tz = pytz.timezone(local_tz) local_time = datetime.strptime(localtime, time_format) - localtime = local_tz.localize(local_time) # type: ignore + if local_tz is None: + localtime = local_time.astimezone() # type: ignore + elif isinstance(local_tz, str): + local_tz = pytz.timezone(local_tz) + localtime = local_tz.localize(local_time) # type: ignore timestamp = int(localtime.timestamp()) # type: ignore return timestamp except Exception as e: From 2931c891a7b5f2d106eb3cbb0722f9e7206b7669 Mon Sep 17 00:00:00 2001 From: HyaCinth <88471803+HyaCiovo@users.noreply.github.com> Date: Thu, 7 Aug 2025 14:19:38 +0800 Subject: [PATCH 170/415] chore: Optimize component styles and interactions (#23250) (#23543) --- web/app/components/base/chat/chat/index.tsx | 6 +++--- .../components/workflow/panel/debug-and-preview/index.tsx | 2 +- web/app/components/workflow/shortcuts-name.tsx | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/app/components/base/chat/chat/index.tsx b/web/app/components/base/chat/chat/index.tsx index 16717c53f9..bee37cf2cd 100644 --- a/web/app/components/base/chat/chat/index.tsx +++ b/web/app/components/base/chat/chat/index.tsx @@ -284,9 +284,9 @@ const Chat: FC = ({ { !noStopResponding && isResponding && (
    -
    ) diff --git a/web/app/components/workflow/panel/debug-and-preview/index.tsx b/web/app/components/workflow/panel/debug-and-preview/index.tsx index baf4c21dcd..ed3f29e871 100644 --- a/web/app/components/workflow/panel/debug-and-preview/index.tsx +++ b/web/app/components/workflow/panel/debug-and-preview/index.tsx @@ -90,7 +90,7 @@ const DebugAndPreview = () => {
    diff --git a/web/app/components/workflow/shortcuts-name.tsx b/web/app/components/workflow/shortcuts-name.tsx index 8d96bdfe6e..e7122c5ad5 100644 --- a/web/app/components/workflow/shortcuts-name.tsx +++ b/web/app/components/workflow/shortcuts-name.tsx @@ -19,7 +19,7 @@ const ShortcutsName = ({ keys.map(key => (
    {getKeyboardKeyNameBySystem(key)}
    From e01510e2a6b64ac6de2eef39495bcd08e5bf5d6d Mon Sep 17 00:00:00 2001 From: yunqiqiliang <132561395+yunqiqiliang@users.noreply.github.com> Date: Thu, 7 Aug 2025 14:21:46 +0800 Subject: [PATCH 171/415] feat: Add Clickzetta Lakehouse vector database integration (#22551) Co-authored-by: Claude --- .env.example | 1197 +++++++++++++++++ .gitignore | 7 + api/configs/middleware/__init__.py | 10 +- .../clickzetta_volume_storage_config.py | 65 + .../middleware/vdb/clickzetta_config.py | 69 + api/controllers/console/datasets/datasets.py | 2 + .../rag/datasource/vdb/clickzetta/README.md | 190 +++ .../rag/datasource/vdb/clickzetta/__init__.py | 1 + .../vdb/clickzetta/clickzetta_vector.py | 834 ++++++++++++ api/core/rag/datasource/vdb/vector_factory.py | 4 + api/core/rag/datasource/vdb/vector_type.py | 1 + api/extensions/ext_storage.py | 13 + .../storage/clickzetta_volume/__init__.py | 5 + .../clickzetta_volume_storage.py | 530 ++++++++ .../clickzetta_volume/file_lifecycle.py | 516 +++++++ .../clickzetta_volume/volume_permissions.py | 646 +++++++++ api/extensions/storage/storage_type.py | 1 + api/pyproject.toml | 2 + .../storage/test_clickzetta_volume.py | 168 +++ .../vdb/clickzetta/README.md | 25 + .../vdb/clickzetta/test_clickzetta.py | 237 ++++ .../vdb/clickzetta/test_docker_integration.py | 165 +++ api/uv.lock | 58 +- docker/.env.example | 35 +- docker/docker-compose.yaml | 16 + 25 files changed, 4788 insertions(+), 9 deletions(-) create mode 100644 .env.example create mode 100644 api/configs/middleware/storage/clickzetta_volume_storage_config.py create mode 100644 api/configs/middleware/vdb/clickzetta_config.py create mode 100644 api/core/rag/datasource/vdb/clickzetta/README.md create mode 100644 api/core/rag/datasource/vdb/clickzetta/__init__.py create mode 100644 api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py create mode 100644 api/extensions/storage/clickzetta_volume/__init__.py create mode 100644 api/extensions/storage/clickzetta_volume/clickzetta_volume_storage.py create mode 100644 api/extensions/storage/clickzetta_volume/file_lifecycle.py create mode 100644 api/extensions/storage/clickzetta_volume/volume_permissions.py create mode 100644 api/tests/integration_tests/storage/test_clickzetta_volume.py create mode 100644 api/tests/integration_tests/vdb/clickzetta/README.md create mode 100644 api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py create mode 100644 api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py diff --git a/.env.example b/.env.example new file mode 100644 index 0000000000..3e95f2e982 --- /dev/null +++ b/.env.example @@ -0,0 +1,1197 @@ +# ------------------------------ +# Environment Variables for API service & worker +# ------------------------------ + +# ------------------------------ +# Common Variables +# ------------------------------ + +# The backend URL of the console API, +# used to concatenate the authorization callback. +# If empty, it is the same domain. +# Example: https://api.console.dify.ai +CONSOLE_API_URL= + +# The front-end URL of the console web, +# used to concatenate some front-end addresses and for CORS configuration use. +# If empty, it is the same domain. +# Example: https://console.dify.ai +CONSOLE_WEB_URL= + +# Service API Url, +# used to display Service API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://api.dify.ai +SERVICE_API_URL= + +# WebApp API backend Url, +# used to declare the back-end URL for the front-end API. +# If empty, it is the same domain. +# Example: https://api.app.dify.ai +APP_API_URL= + +# WebApp Url, +# used to display WebAPP API Base Url to the front-end. +# If empty, it is the same domain. +# Example: https://app.dify.ai +APP_WEB_URL= + +# File preview or download Url prefix. +# used to display File preview or download Url to the front-end or as Multi-model inputs; +# Url is signed and has expiration time. +# Setting FILES_URL is required for file processing plugins. +# - For https://example.com, use FILES_URL=https://example.com +# - For http://example.com, use FILES_URL=http://example.com +# Recommendation: use a dedicated domain (e.g., https://upload.example.com). +# Alternatively, use http://:5001 or http://api:5001, +# ensuring port 5001 is externally accessible (see docker-compose.yaml). +FILES_URL= + +# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. +# Set this to the internal Docker service URL for proper plugin file access. +# Example: INTERNAL_FILES_URL=http://api:5001 +INTERNAL_FILES_URL= + +# ------------------------------ +# Server Configuration +# ------------------------------ + +# The log level for the application. +# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` +LOG_LEVEL=INFO +# Log file path +LOG_FILE=/app/logs/server.log +# Log file max size, the unit is MB +LOG_FILE_MAX_SIZE=20 +# Log file max backup count +LOG_FILE_BACKUP_COUNT=5 +# Log dateformat +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +# Log Timezone +LOG_TZ=UTC + +# Debug mode, default is false. +# It is recommended to turn on this configuration for local development +# to prevent some problems caused by monkey patch. +DEBUG=false + +# Flask debug mode, it can output trace information at the interface when turned on, +# which is convenient for debugging. +FLASK_DEBUG=false + +# Enable request logging, which will log the request and response information. +# And the log level is DEBUG +ENABLE_REQUEST_LOGGING=False + +# A secret key that is used for securely signing the session cookie +# and encrypting sensitive information on the database. +# You can generate a strong key using `openssl rand -base64 42`. +SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U + +# Password for admin user initialization. +# If left unset, admin user will not be prompted for a password +# when creating the initial admin account. +# The length of the password cannot exceed 30 characters. +INIT_PASSWORD= + +# Deployment environment. +# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. +# Testing environment. There will be a distinct color label on the front-end page, +# indicating that this environment is a testing environment. +DEPLOY_ENV=PRODUCTION + +# Whether to enable the version check policy. +# If set to empty, https://updates.dify.ai will be called for version check. +CHECK_UPDATE_URL=https://updates.dify.ai + +# Used to change the OpenAI base address, default is https://api.openai.com/v1. +# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, +# or when a local model provides OpenAI compatible API, it can be replaced. +OPENAI_API_BASE=https://api.openai.com/v1 + +# When enabled, migrations will be executed prior to application startup +# and the application will start after the migrations have completed. +MIGRATION_ENABLED=true + +# File Access Time specifies a time interval in seconds for the file to be accessed. +# The default value is 300 seconds. +FILES_ACCESS_TIMEOUT=300 + +# Access token expiration time in minutes +ACCESS_TOKEN_EXPIRE_MINUTES=60 + +# Refresh token expiration time in days +REFRESH_TOKEN_EXPIRE_DAYS=30 + +# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 + +# ------------------------------ +# Container Startup Related Configuration +# Only effective when starting with docker image or docker-compose. +# ------------------------------ + +# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +DIFY_BIND_ADDRESS=0.0.0.0 + +# API service binding port number, default 5001. +DIFY_PORT=5001 + +# The number of API server workers, i.e., the number of workers. +# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent +# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers +SERVER_WORKER_AMOUNT=1 + +# Defaults to gevent. If using windows, it can be switched to sync or solo. +SERVER_WORKER_CLASS=gevent + +# Default number of worker connections, the default is 10. +SERVER_WORKER_CONNECTIONS=10 + +# Similar to SERVER_WORKER_CLASS. +# If using windows, it can be switched to sync or solo. +CELERY_WORKER_CLASS= + +# Request handling timeout. The default is 200, +# it is recommended to set it to 360 to support a longer sse connection time. +GUNICORN_TIMEOUT=360 + +# The number of Celery workers. The default is 1, and can be set as needed. +CELERY_WORKER_AMOUNT= + +# Flag indicating whether to enable autoscaling of Celery workers. +# +# Autoscaling is useful when tasks are CPU intensive and can be dynamically +# allocated and deallocated based on the workload. +# +# When autoscaling is enabled, the maximum and minimum number of workers can +# be specified. The autoscaling algorithm will dynamically adjust the number +# of workers within the specified range. +# +# Default is false (i.e., autoscaling is disabled). +# +# Example: +# CELERY_AUTO_SCALE=true +CELERY_AUTO_SCALE=false + +# The maximum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MAX_WORKERS= + +# The minimum number of Celery workers that can be autoscaled. +# This is optional and only used when autoscaling is enabled. +# Default is not set. +CELERY_MIN_WORKERS= + +# API Tool configuration +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 + +# ------------------------------- +# Datasource Configuration +# -------------------------------- +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true + +# ------------------------------ +# Database Configuration +# The database uses PostgreSQL. Please use the public schema. +# It is consistent with the configuration in the 'db' service below. +# ------------------------------ + +DB_USERNAME=postgres +DB_PASSWORD=difyai123456 +DB_HOST=db +DB_PORT=5432 +DB_DATABASE=dify +# The size of the database connection pool. +# The default is 30 connections, which can be appropriately increased. +SQLALCHEMY_POOL_SIZE=30 +# Database connection pool recycling time, the default is 3600 seconds. +SQLALCHEMY_POOL_RECYCLE=3600 +# Whether to print SQL, default is false. +SQLALCHEMY_ECHO=false +# If True, will test connections for liveness upon each checkout +SQLALCHEMY_POOL_PRE_PING=false +# Whether to enable the Last in first out option or use default FIFO queue if is false +SQLALCHEMY_POOL_USE_LIFO=false + +# Maximum number of connections to the database +# Default is 100 +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +POSTGRES_MAX_CONNECTIONS=100 + +# Sets the amount of shared memory used for postgres's shared buffers. +# Default is 128MB +# Recommended value: 25% of available memory +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS +POSTGRES_SHARED_BUFFERS=128MB + +# Sets the amount of memory used by each database worker for working space. +# Default is 4MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM +POSTGRES_WORK_MEM=4MB + +# Sets the amount of memory reserved for maintenance activities. +# Default is 64MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM +POSTGRES_MAINTENANCE_WORK_MEM=64MB + +# Sets the planner's assumption about the effective cache size. +# Default is 4096MB +# +# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB + +# ------------------------------ +# Redis Configuration +# This Redis configuration is used for caching and for pub/sub during conversation. +# ------------------------------ + +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=difyai123456 +REDIS_USE_SSL=false +REDIS_DB=0 + +# Whether to use Redis Sentinel mode. +# If set to true, the application will automatically discover and connect to the master node through Sentinel. +REDIS_USE_SENTINEL=false + +# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. +# Format: `:,:,:` +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 + +# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. +# Format: `:,:,:` +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= + +# ------------------------------ +# Celery Configuration +# ------------------------------ + +# Use redis as the broker, and redis db 1 for celery broker. +# Format as follows: `redis://:@:/` +# Example: redis://:difyai123456@redis:6379/1 +# If use Redis Sentinel, format as follows: `sentinel://:@:/` +# Example: sentinel://localhost:26379/1;sentinel://localhost:26380/1;sentinel://localhost:26381/1 +CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +BROKER_USE_SSL=false + +# If you are using Redis Sentinel for high availability, configure the following settings. +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_PASSWORD= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 + +# ------------------------------ +# CORS Configuration +# Used to set the front-end cross-domain access policy. +# ------------------------------ + +# Specifies the allowed origins for cross-origin requests to the Web API, +# e.g. https://dify.app or * for all origins. +WEB_API_CORS_ALLOW_ORIGINS=* + +# Specifies the allowed origins for cross-origin requests to the console API, +# e.g. https://cloud.dify.ai or * for all origins. +CONSOLE_CORS_ALLOW_ORIGINS=* + +# ------------------------------ +# File Storage Configuration +# ------------------------------ + +# The type of storage to use for storing user files. +STORAGE_TYPE=opendal + +# Apache OpenDAL Configuration +# The configuration for OpenDAL consists of the following format: OPENDAL__. +# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. +# Dify will scan configurations starting with OPENDAL_ and automatically apply them. +# The scheme name for the OpenDAL storage. +OPENDAL_SCHEME=fs +# Configurations for OpenDAL Local File System. +OPENDAL_FS_ROOT=storage + +# ClickZetta Volume Configuration (for storage backend) +# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume +# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters + +# Volume type selection (three types available): +# - user: Personal/small team use, simple config, user-level permissions +# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions +# - external: Data lake integration, external storage connection, volume-level + storage-level permissions +CLICKZETTA_VOLUME_TYPE=user + +# External Volume name (required only when TYPE=external) +CLICKZETTA_VOLUME_NAME= + +# Table Volume table prefix (used only when TYPE=table) +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ + +# Dify file directory prefix (isolates from other apps, recommended to keep default) +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km + +# S3 Configuration +# +S3_ENDPOINT= +S3_REGION=us-east-1 +S3_BUCKET_NAME=difyai +S3_ACCESS_KEY= +S3_SECRET_KEY= +# Whether to use AWS managed IAM roles for authenticating with the S3 service. +# If set to false, the access key and secret key must be provided. +S3_USE_AWS_MANAGED_IAM=false + +# Azure Blob Configuration +# +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_ACCOUNT_KEY=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net + +# Google Storage Configuration +# +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= + +# The Alibaba Cloud OSS configurations, +# +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +# Don't start with '/'. OSS doesn't support leading slash in object names. +ALIYUN_OSS_PATH=your-path + +# Tencent COS Configuration +# +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme + +# Oracle Storage Configuration +# +OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +OCI_REGION=us-ashburn-1 + +# Huawei OBS Configuration +# +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +HUAWEI_OBS_SERVER=your-server-url + +# Volcengine TOS Configuration +# +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region + +# Baidu OBS Storage Configuration +# +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +BAIDU_OBS_ENDPOINT=your-server-url + +# Supabase Storage Configuration +# +SUPABASE_BUCKET_NAME=your-bucket-name +SUPABASE_API_KEY=your-access-key +SUPABASE_URL=your-server-url + +# ------------------------------ +# Vector Database Configuration +# ------------------------------ + +# The type of vector store to use. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`. +VECTOR_STORE=weaviate + +# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. +WEAVIATE_ENDPOINT=http://weaviate:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih + +# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=difyai123456 +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 +QDRANT_REPLICATION_FACTOR=1 + +# Milvus configuration. Only available when VECTOR_STORE is `milvus`. +# The milvus uri. +MILVUS_URI=http://host.docker.internal:19530 +MILVUS_DATABASE= +MILVUS_TOKEN= +MILVUS_USER= +MILVUS_PASSWORD= +MILVUS_ENABLE_HYBRID_SEARCH=False +MILVUS_ANALYZER_PARAMS= + +# MyScale configuration, only available when VECTOR_STORE is `myscale` +# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: +# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= + +# Couchbase configurations, only available when VECTOR_STORE is `couchbase` +# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) +COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server +COUCHBASE_USER=Administrator +COUCHBASE_PASSWORD=password +COUCHBASE_BUCKET_NAME=Embeddings +COUCHBASE_SCOPE_NAME=_default + +# pgvector configurations, only available when VECTOR_STORE is `pgvector` +PGVECTOR_HOST=pgvector +PGVECTOR_PORT=5432 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=difyai123456 +PGVECTOR_DATABASE=dify +PGVECTOR_MIN_CONNECTION=1 +PGVECTOR_MAX_CONNECTION=5 +PGVECTOR_PG_BIGM=false +PGVECTOR_PG_BIGM_VERSION=1.2-20240606 + +# vastbase configurations, only available when VECTOR_STORE is `vastbase` +VASTBASE_HOST=vastbase +VASTBASE_PORT=5432 +VASTBASE_USER=dify +VASTBASE_PASSWORD=Difyai123456 +VASTBASE_DATABASE=dify +VASTBASE_MIN_CONNECTION=1 +VASTBASE_MAX_CONNECTION=5 + +# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify + +# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +ANALYTICDB_HOST=gp-test.aliyuncs.com +ANALYTICDB_PORT=5432 +ANALYTICDB_MIN_CONNECTION=1 +ANALYTICDB_MAX_CONNECTION=5 + +# TiDB vector configurations, only available when VECTOR_STORE is `tidb_vector` +TIDB_VECTOR_HOST=tidb +TIDB_VECTOR_PORT=4000 +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= +TIDB_VECTOR_DATABASE=dify + +# Matrixone vector configurations. +MATRIXONE_HOST=matrixone +MATRIXONE_PORT=6001 +MATRIXONE_USER=dump +MATRIXONE_PASSWORD=111 +MATRIXONE_DATABASE=dify + +# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` +TIDB_ON_QDRANT_URL=http://127.0.0.1 +TIDB_ON_QDRANT_API_KEY=dify +TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 +TIDB_ON_QDRANT_GRPC_ENABLED=false +TIDB_ON_QDRANT_GRPC_PORT=6334 +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +TIDB_API_URL=http://127.0.0.1 +TIDB_IAM_API_URL=http://127.0.0.1 +TIDB_REGION=regions/aws-us-east-1 +TIDB_PROJECT_ID=dify +TIDB_SPEND_LIMIT=100 + +# Chroma configuration, only available when VECTOR_STORE is `chroma` +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= + +# Oracle configuration, only available when VECTOR_STORE is `oracle` +ORACLE_USER=dify +ORACLE_PASSWORD=dify +ORACLE_DSN=oracle:1521/FREEPDB1 +ORACLE_CONFIG_DIR=/app/api/storage/wallet +ORACLE_WALLET_LOCATION=/app/api/storage/wallet +ORACLE_WALLET_PASSWORD=dify +ORACLE_IS_AUTONOMOUS=false + +# relyt configurations, only available when VECTOR_STORE is `relyt` +RELYT_HOST=db +RELYT_PORT=5432 +RELYT_USER=postgres +RELYT_PASSWORD=difyai123456 +RELYT_DATABASE=postgres + +# open search configuration, only available when VECTOR_STORE is `opensearch` +OPENSEARCH_HOST=opensearch +OPENSEARCH_PORT=9200 +OPENSEARCH_SECURE=true +OPENSEARCH_VERIFY_CERTS=true +OPENSEARCH_AUTH_METHOD=basic +OPENSEARCH_USER=admin +OPENSEARCH_PASSWORD=admin +# If using AWS managed IAM, e.g. Managed Cluster or OpenSearch Serverless +OPENSEARCH_AWS_REGION=ap-southeast-1 +OPENSEARCH_AWS_SERVICE=aoss + +# tencent vector configurations, only available when VECTOR_STORE is `tencent` +TENCENT_VECTOR_DB_URL=http://127.0.0.1 +TENCENT_VECTOR_DB_API_KEY=dify +TENCENT_VECTOR_DB_TIMEOUT=30 +TENCENT_VECTOR_DB_USERNAME=dify +TENCENT_VECTOR_DB_DATABASE=dify +TENCENT_VECTOR_DB_SHARD=1 +TENCENT_VECTOR_DB_REPLICAS=2 +TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false + +# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` +ELASTICSEARCH_HOST=0.0.0.0 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_USERNAME=elastic +ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 + +# baidu vector configurations, only available when VECTOR_STORE is `baidu` +BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 +BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 +BAIDU_VECTOR_DB_ACCOUNT=root +BAIDU_VECTOR_DB_API_KEY=dify +BAIDU_VECTOR_DB_DATABASE=dify +BAIDU_VECTOR_DB_SHARD=1 +BAIDU_VECTOR_DB_REPLICAS=3 + +# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +VIKINGDB_REGION=cn-shanghai +VIKINGDB_HOST=api-vikingdb.xxx.volces.com +VIKINGDB_SCHEMA=http +VIKINGDB_CONNECTION_TIMEOUT=30 +VIKINGDB_SOCKET_TIMEOUT=30 + +# Lindorm configuration, only available when VECTOR_STORE is `lindorm` +LINDORM_URL=http://lindorm:30070 +LINDORM_USERNAME=lindorm +LINDORM_PASSWORD=lindorm +LINDORM_QUERY_TIMEOUT=1 + +# OceanBase Vector configuration, only available when VECTOR_STORE is `oceanbase` +OCEANBASE_VECTOR_HOST=oceanbase +OCEANBASE_VECTOR_PORT=2881 +OCEANBASE_VECTOR_USER=root@test +OCEANBASE_VECTOR_PASSWORD=difyai123456 +OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_CLUSTER_NAME=difyai +OCEANBASE_MEMORY_LIMIT=6G +OCEANBASE_ENABLE_HYBRID_SEARCH=false + +# opengauss configurations, only available when VECTOR_STORE is `opengauss` +OPENGAUSS_HOST=opengauss +OPENGAUSS_PORT=6600 +OPENGAUSS_USER=postgres +OPENGAUSS_PASSWORD=Dify@123 +OPENGAUSS_DATABASE=dify +OPENGAUSS_MIN_CONNECTION=1 +OPENGAUSS_MAX_CONNECTION=5 +OPENGAUSS_ENABLE_PQ=false + +# huawei cloud search service vector configurations, only available when VECTOR_STORE is `huawei_cloud` +HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 +HUAWEI_CLOUD_USER=admin +HUAWEI_CLOUD_PASSWORD=admin + +# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPSTASH_VECTOR_TOKEN=dify + +# TableStore Vector configuration +# (only used when VECTOR_STORE is tablestore) +TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com +TABLESTORE_INSTANCE_NAME=instance-name +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx + +# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` +CLICKZETTA_USERNAME= +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance + +# ------------------------------ +# Knowledge Configuration +# ------------------------------ + +# Upload file size limit, default 15M. +UPLOAD_FILE_SIZE_LIMIT=15 + +# The maximum number of files that can be uploaded at a time, default 5. +UPLOAD_FILE_BATCH_LIMIT=5 + +# ETL type, support: `dify`, `Unstructured` +# `dify` Dify's proprietary file extraction scheme +# `Unstructured` Unstructured.io file extraction scheme +ETL_TYPE=dify + +# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured +# Or using Unstructured for document extractor node for pptx. +# For example: http://unstructured:8000/general/v0/general +UNSTRUCTURED_API_URL= +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true + +# ------------------------------ +# Model Configuration +# ------------------------------ + +# The maximum number of tokens allowed for prompt generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating a prompt in the prompt generation tool. +# Default: 512 tokens. +PROMPT_GENERATION_MAX_TOKENS=512 + +# The maximum number of tokens allowed for code generation. +# This setting controls the upper limit of tokens that can be used by the LLM +# when generating code in the code generation tool. +# Default: 1024 tokens. +CODE_GENERATION_MAX_TOKENS=1024 + +# Enable or disable plugin based token counting. If disabled, token counting will return 0. +# This can improve performance by skipping token counting operations. +# Default: false (disabled). +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false + +# ------------------------------ +# Multi-modal Configuration +# ------------------------------ + +# The format of the image/video/audio/document sent when the multi-modal model is input, +# the default is base64, optional url. +# The delay of the call in url mode will be lower than that in base64 mode. +# It is generally recommended to use the more compatible base64 mode. +# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. +MULTIMODAL_SEND_FORMAT=base64 +# Upload image file size limit, default 10M. +UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 +# Upload video file size limit, default 100M. +UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 +# Upload audio file size limit, default 50M. +UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 + +# ------------------------------ +# Sentry Configuration +# Used for application monitoring and error log tracking. +# ------------------------------ +SENTRY_DSN= + +# API Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +API_SENTRY_DSN= +# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 + +# Web Service Sentry DSN address, default is empty, when empty, +# all monitoring information is not reported to Sentry. +# If not set, Sentry error reporting will be disabled. +WEB_SENTRY_DSN= + +# ------------------------------ +# Notion Integration Configuration +# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations +# ------------------------------ + +# Configure as "public" or "internal". +# Since Notion's OAuth redirect URL only supports HTTPS, +# if deploying locally, please use Notion's internal integration. +NOTION_INTEGRATION_TYPE=public +# Notion OAuth client secret (used for public integration type) +NOTION_CLIENT_SECRET= +# Notion OAuth client id (used for public integration type) +NOTION_CLIENT_ID= +# Notion internal integration secret. +# If the value of NOTION_INTEGRATION_TYPE is "internal", +# you need to configure this variable. +NOTION_INTERNAL_SECRET= + +# ------------------------------ +# Mail related configuration +# ------------------------------ + +# Mail type, support: resend, smtp, sendgrid +MAIL_TYPE=resend + +# Default send from email address, if not specified +# If using SendGrid, use the 'from' field for authentication if necessary. +MAIL_DEFAULT_SEND_FROM= + +# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. +RESEND_API_URL=https://api.resend.com +RESEND_API_KEY=your-resend-api-key + + +# SMTP server configuration, used when MAIL_TYPE is `smtp` +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false + +# Sendgid configuration +SENDGRID_API_KEY= + +# ------------------------------ +# Others Configuration +# ------------------------------ + +# Maximum length of segmentation tokens for indexing +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 + +# Member invitation link valid time (hours), +# Default: 72. +INVITE_EXPIRY_HOURS=72 + +# Reset password token valid time (minutes), +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 + +# The sandbox service endpoint. +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=80000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=80000 + +# Workflow runtime configuration +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_PARALLEL_DEPTH_LIMIT=3 +WORKFLOW_FILE_UPLOAD_LIMIT=10 + +# Workflow storage configuration +# Options: rdbms, hybrid +# rdbms: Use only the relational database (default) +# hybrid: Save new data to object storage, read from both object storage and RDBMS +WORKFLOW_NODE_EXECUTION_STORAGE=rdbms + +# Repository configuration +# Core workflow execution repository implementation +CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository + +# Core workflow node execution repository implementation +CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository + +# API workflow node execution repository implementation +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository + +# API workflow run repository implementation +API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository + +# HTTP request node in workflow configuration +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 +HTTP_REQUEST_NODE_SSL_VERIFY=True + +# Respect X-* headers to redirect clients +RESPECT_XFORWARD_HEADERS_ENABLED=false + +# SSRF Proxy server HTTP URL +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +# SSRF Proxy server HTTPS URL +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 + +# Maximum loop count in the workflow +LOOP_NODE_MAX_COUNT=100 + +# The maximum number of tools that can be used in the agent. +MAX_TOOLS_NUM=10 + +# Maximum number of Parallelism branches in the workflow +MAX_PARALLEL_LIMIT=10 + +# The maximum number of iterations for agent setting +MAX_ITERATIONS_NUM=99 + +# ------------------------------ +# Environment Variables for web Service +# ------------------------------ + +# The timeout for the text generation in millisecond +TEXT_GENERATION_TIMEOUT_MS=60000 + +# Allow rendering unsafe URLs which have "data:" scheme. +ALLOW_UNSAFE_DATA_SCHEME=false + +# ------------------------------ +# Environment Variables for db Service +# ------------------------------ + +# The name of the default postgres user. +POSTGRES_USER=${DB_USERNAME} +# The password for the default postgres user. +POSTGRES_PASSWORD=${DB_PASSWORD} +# The name of the default postgres database. +POSTGRES_DB=${DB_DATABASE} +# postgres data directory +PGDATA=/var/lib/postgresql/data/pgdata + +# ------------------------------ +# Environment Variables for sandbox Service +# ------------------------------ + +# The API key for the sandbox service +SANDBOX_API_KEY=dify-sandbox +# The mode in which the Gin framework runs +SANDBOX_GIN_MODE=release +# The timeout for the worker in seconds +SANDBOX_WORKER_TIMEOUT=15 +# Enable network for the sandbox service +SANDBOX_ENABLE_NETWORK=true +# HTTP proxy URL for SSRF protection +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +# HTTPS proxy URL for SSRF protection +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +# The port on which the sandbox service runs +SANDBOX_PORT=8194 + +# ------------------------------ +# Environment Variables for weaviate Service +# (only used when VECTOR_STORE is weaviate) +# ------------------------------ +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai + +# ------------------------------ +# Environment Variables for Chroma +# (only used when VECTOR_STORE is chroma) +# ------------------------------ + +# Authentication credentials for Chroma server +CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 +# Authentication provider for Chroma server +CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider +# Persistence setting for Chroma server +CHROMA_IS_PERSISTENT=TRUE + +# ------------------------------ +# Environment Variables for Oracle Service +# (only used when VECTOR_STORE is oracle) +# ------------------------------ +ORACLE_PWD=Dify123456 +ORACLE_CHARACTERSET=AL32UTF8 + +# ------------------------------ +# Environment Variables for milvus Service +# (only used when VECTOR_STORE is milvus) +# ------------------------------ +# ETCD configuration for auto compaction mode +ETCD_AUTO_COMPACTION_MODE=revision +# ETCD configuration for auto compaction retention in terms of number of revisions +ETCD_AUTO_COMPACTION_RETENTION=1000 +# ETCD configuration for backend quota in bytes +ETCD_QUOTA_BACKEND_BYTES=4294967296 +# ETCD configuration for the number of changes before triggering a snapshot +ETCD_SNAPSHOT_COUNT=50000 +# MinIO access key for authentication +MINIO_ACCESS_KEY=minioadmin +# MinIO secret key for authentication +MINIO_SECRET_KEY=minioadmin +# ETCD service endpoints +ETCD_ENDPOINTS=etcd:2379 +# MinIO service address +MINIO_ADDRESS=minio:9000 +# Enable or disable security authorization +MILVUS_AUTHORIZATION_ENABLED=true + +# ------------------------------ +# Environment Variables for pgvector / pgvector-rs Service +# (only used when VECTOR_STORE is pgvector / pgvector-rs) +# ------------------------------ +PGVECTOR_PGUSER=postgres +# The password for the default postgres user. +PGVECTOR_POSTGRES_PASSWORD=difyai123456 +# The name of the default postgres database. +PGVECTOR_POSTGRES_DB=dify +# postgres data directory +PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata + +# ------------------------------ +# Environment Variables for opensearch +# (only used when VECTOR_STORE is opensearch) +# ------------------------------ +OPENSEARCH_DISCOVERY_TYPE=single-node +OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true +OPENSEARCH_JAVA_OPTS_MIN=512m +OPENSEARCH_JAVA_OPTS_MAX=1024m +OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 +OPENSEARCH_MEMLOCK_SOFT=-1 +OPENSEARCH_MEMLOCK_HARD=-1 +OPENSEARCH_NOFILE_SOFT=65536 +OPENSEARCH_NOFILE_HARD=65536 + +# ------------------------------ +# Environment Variables for Nginx reverse proxy +# ------------------------------ +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +# HTTP port +NGINX_PORT=80 +# SSL settings are only applied when HTTPS_ENABLED is true +NGINX_SSL_PORT=443 +# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory +# and modify the env vars below accordingly. +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.1 TLSv1.2 TLSv1.3 + +# Nginx performance tuning +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 + +# Proxy settings +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s + +# Set true to accept requests for /.well-known/acme-challenge/ +NGINX_ENABLE_CERTBOT_CHALLENGE=false + +# ------------------------------ +# Certbot Configuration +# ------------------------------ + +# Email address (required to get certificates from Let's Encrypt) +CERTBOT_EMAIL=your_email@example.com + +# Domain name +CERTBOT_DOMAIN=your_domain.com + +# certbot command options +# i.e: --force-renewal --dry-run --test-cert --debug +CERTBOT_OPTIONS= + +# ------------------------------ +# Environment Variables for SSRF Proxy +# ------------------------------ +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 + +# ------------------------------ +# docker env var for specifying vector db type at startup +# (based on the vector db type, the corresponding docker +# compose profile will be used) +# if you want to use unstructured, add ',unstructured' to the end +# ------------------------------ +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate} + +# ------------------------------ +# Docker Compose Service Expose Host Port Configurations +# ------------------------------ +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 + +# ---------------------------------------------------------------------------- +# ModelProvider & Tool Position Configuration +# Used to specify the model providers and tools that can be used in the app. +# ---------------------------------------------------------------------------- + +# Pin, include, and exclude tools +# Use comma-separated values with no spaces between items. +# Example: POSITION_TOOL_PINS=bing,google +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= + +# Pin, include, and exclude model providers +# Use comma-separated values with no spaces between items. +# Example: POSITION_PROVIDER_PINS=openai,openllm +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= + +# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP +CSP_WHITELIST= + +# Enable or disable create tidb service job +CREATE_TIDB_SERVICE_JOB_ENABLED=false + +# Maximum number of submitted thread count in a ThreadPool for parallel node execution +MAX_SUBMIT_COUNT=100 + +# The maximum number of top-k value for RAG. +TOP_K_MAX_VALUE=10 + +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +EXPOSE_PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_PORT=5002 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DAEMON_URL=http://plugin_daemon:5002 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_PPROF_ENABLED=false + +PLUGIN_DEBUGGING_HOST=0.0.0.0 +PLUGIN_DEBUGGING_PORT=5003 +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 + +# If this key is changed, DIFY_INNER_API_KEY in plugin_daemon service must also be updated or agent node will fail. +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DIFY_INNER_API_URL=http://api:5001 + +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} + +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai + +FORCE_VERIFYING_SIGNATURE=true + +PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 +PLUGIN_MAX_EXECUTION_TIMEOUT=600 +# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple +PIP_MIRROR_URL= + +# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example +# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos +PLUGIN_STORAGE_TYPE=local +PLUGIN_STORAGE_LOCAL_ROOT=/app/storage +PLUGIN_WORKING_PATH=/app/storage/cwd +PLUGIN_INSTALLED_PATH=plugin +PLUGIN_PACKAGE_CACHE_PATH=plugin_packages +PLUGIN_MEDIA_CACHE_PATH=assets +# Plugin oss bucket +PLUGIN_STORAGE_OSS_BUCKET= +# Plugin oss s3 credentials +PLUGIN_S3_USE_AWS=false +PLUGIN_S3_USE_AWS_MANAGED_IAM=false +PLUGIN_S3_ENDPOINT= +PLUGIN_S3_USE_PATH_STYLE=false +PLUGIN_AWS_ACCESS_KEY= +PLUGIN_AWS_SECRET_KEY= +PLUGIN_AWS_REGION= +# Plugin oss azure blob +PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= +PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= +# Plugin oss tencent cos +PLUGIN_TENCENT_COS_SECRET_KEY= +PLUGIN_TENCENT_COS_SECRET_ID= +PLUGIN_TENCENT_COS_REGION= +# Plugin oss aliyun oss +PLUGIN_ALIYUN_OSS_REGION= +PLUGIN_ALIYUN_OSS_ENDPOINT= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= +PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 +PLUGIN_ALIYUN_OSS_PATH= +# Plugin oss volcengine tos +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +PLUGIN_VOLCENGINE_TOS_REGION= + +# ------------------------------ +# OTLP Collector Configuration +# ------------------------------ +ENABLE_OTEL=false +OTLP_TRACE_ENDPOINT= +OTLP_METRIC_ENDPOINT= +OTLP_BASE_ENDPOINT=http://localhost:4318 +OTLP_API_KEY= +OTEL_EXPORTER_OTLP_PROTOCOL= +OTEL_EXPORTER_TYPE=otlp +OTEL_SAMPLING_RATE=0.1 +OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 +OTEL_MAX_QUEUE_SIZE=2048 +OTEL_MAX_EXPORT_BATCH_SIZE=512 +OTEL_METRIC_EXPORT_INTERVAL=60000 +OTEL_BATCH_EXPORT_TIMEOUT=10000 +OTEL_METRIC_EXPORT_TIMEOUT=30000 + +# Prevent Clickjacking +ALLOW_EMBED=false + +# Dataset queue monitor configuration +QUEUE_MONITOR_THRESHOLD=200 +# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai +QUEUE_MONITOR_ALERT_EMAILS= +# Monitor interval in minutes, default is 30 minutes +QUEUE_MONITOR_INTERVAL=30 diff --git a/.gitignore b/.gitignore index dd4673a3d2..c60957db72 100644 --- a/.gitignore +++ b/.gitignore @@ -215,3 +215,10 @@ mise.toml # AI Assistant .roo/ api/.env.backup + +# Clickzetta test credentials +.env.clickzetta +.env.clickzetta.test + +# Clickzetta plugin development folder (keep local, ignore for PR) +clickzetta/ diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py index ff290ff99d..4e228ab932 100644 --- a/api/configs/middleware/__init__.py +++ b/api/configs/middleware/__init__.py @@ -10,6 +10,7 @@ from .storage.aliyun_oss_storage_config import AliyunOSSStorageConfig from .storage.amazon_s3_storage_config import S3StorageConfig from .storage.azure_blob_storage_config import AzureBlobStorageConfig from .storage.baidu_obs_storage_config import BaiduOBSStorageConfig +from .storage.clickzetta_volume_storage_config import ClickZettaVolumeStorageConfig from .storage.google_cloud_storage_config import GoogleCloudStorageConfig from .storage.huawei_obs_storage_config import HuaweiCloudOBSStorageConfig from .storage.oci_storage_config import OCIStorageConfig @@ -20,6 +21,7 @@ from .storage.volcengine_tos_storage_config import VolcengineTOSStorageConfig from .vdb.analyticdb_config import AnalyticdbConfig from .vdb.baidu_vector_config import BaiduVectorDBConfig from .vdb.chroma_config import ChromaConfig +from .vdb.clickzetta_config import ClickzettaConfig from .vdb.couchbase_config import CouchbaseConfig from .vdb.elasticsearch_config import ElasticsearchConfig from .vdb.huawei_cloud_config import HuaweiCloudConfig @@ -52,6 +54,7 @@ class StorageConfig(BaseSettings): "aliyun-oss", "azure-blob", "baidu-obs", + "clickzetta-volume", "google-storage", "huawei-obs", "oci-storage", @@ -61,8 +64,9 @@ class StorageConfig(BaseSettings): "local", ] = Field( description="Type of storage to use." - " Options: 'opendal', '(deprecated) local', 's3', 'aliyun-oss', 'azure-blob', 'baidu-obs', 'google-storage', " - "'huawei-obs', 'oci-storage', 'tencent-cos', 'volcengine-tos', 'supabase'. Default is 'opendal'.", + " Options: 'opendal', '(deprecated) local', 's3', 'aliyun-oss', 'azure-blob', 'baidu-obs', " + "'clickzetta-volume', 'google-storage', 'huawei-obs', 'oci-storage', 'tencent-cos', " + "'volcengine-tos', 'supabase'. Default is 'opendal'.", default="opendal", ) @@ -303,6 +307,7 @@ class MiddlewareConfig( AliyunOSSStorageConfig, AzureBlobStorageConfig, BaiduOBSStorageConfig, + ClickZettaVolumeStorageConfig, GoogleCloudStorageConfig, HuaweiCloudOBSStorageConfig, OCIStorageConfig, @@ -315,6 +320,7 @@ class MiddlewareConfig( VectorStoreConfig, AnalyticdbConfig, ChromaConfig, + ClickzettaConfig, HuaweiCloudConfig, MilvusConfig, MyScaleConfig, diff --git a/api/configs/middleware/storage/clickzetta_volume_storage_config.py b/api/configs/middleware/storage/clickzetta_volume_storage_config.py new file mode 100644 index 0000000000..56e1b6a957 --- /dev/null +++ b/api/configs/middleware/storage/clickzetta_volume_storage_config.py @@ -0,0 +1,65 @@ +"""ClickZetta Volume Storage Configuration""" + +from typing import Optional + +from pydantic import Field +from pydantic_settings import BaseSettings + + +class ClickZettaVolumeStorageConfig(BaseSettings): + """Configuration for ClickZetta Volume storage.""" + + CLICKZETTA_VOLUME_USERNAME: Optional[str] = Field( + description="Username for ClickZetta Volume authentication", + default=None, + ) + + CLICKZETTA_VOLUME_PASSWORD: Optional[str] = Field( + description="Password for ClickZetta Volume authentication", + default=None, + ) + + CLICKZETTA_VOLUME_INSTANCE: Optional[str] = Field( + description="ClickZetta instance identifier", + default=None, + ) + + CLICKZETTA_VOLUME_SERVICE: str = Field( + description="ClickZetta service endpoint", + default="api.clickzetta.com", + ) + + CLICKZETTA_VOLUME_WORKSPACE: str = Field( + description="ClickZetta workspace name", + default="quick_start", + ) + + CLICKZETTA_VOLUME_VCLUSTER: str = Field( + description="ClickZetta virtual cluster name", + default="default_ap", + ) + + CLICKZETTA_VOLUME_SCHEMA: str = Field( + description="ClickZetta schema name", + default="dify", + ) + + CLICKZETTA_VOLUME_TYPE: str = Field( + description="ClickZetta volume type (table|user|external)", + default="user", + ) + + CLICKZETTA_VOLUME_NAME: Optional[str] = Field( + description="ClickZetta volume name for external volumes", + default=None, + ) + + CLICKZETTA_VOLUME_TABLE_PREFIX: str = Field( + description="Prefix for ClickZetta volume table names", + default="dataset_", + ) + + CLICKZETTA_VOLUME_DIFY_PREFIX: str = Field( + description="Directory prefix for User Volume to organize Dify files", + default="dify_km", + ) diff --git a/api/configs/middleware/vdb/clickzetta_config.py b/api/configs/middleware/vdb/clickzetta_config.py new file mode 100644 index 0000000000..04f81e25fc --- /dev/null +++ b/api/configs/middleware/vdb/clickzetta_config.py @@ -0,0 +1,69 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class ClickzettaConfig(BaseModel): + """ + Clickzetta Lakehouse vector database configuration + """ + + CLICKZETTA_USERNAME: Optional[str] = Field( + description="Username for authenticating with Clickzetta Lakehouse", + default=None, + ) + + CLICKZETTA_PASSWORD: Optional[str] = Field( + description="Password for authenticating with Clickzetta Lakehouse", + default=None, + ) + + CLICKZETTA_INSTANCE: Optional[str] = Field( + description="Clickzetta Lakehouse instance ID", + default=None, + ) + + CLICKZETTA_SERVICE: Optional[str] = Field( + description="Clickzetta API service endpoint (e.g., 'api.clickzetta.com')", + default="api.clickzetta.com", + ) + + CLICKZETTA_WORKSPACE: Optional[str] = Field( + description="Clickzetta workspace name", + default="default", + ) + + CLICKZETTA_VCLUSTER: Optional[str] = Field( + description="Clickzetta virtual cluster name", + default="default_ap", + ) + + CLICKZETTA_SCHEMA: Optional[str] = Field( + description="Database schema name in Clickzetta", + default="public", + ) + + CLICKZETTA_BATCH_SIZE: Optional[int] = Field( + description="Batch size for bulk insert operations", + default=100, + ) + + CLICKZETTA_ENABLE_INVERTED_INDEX: Optional[bool] = Field( + description="Enable inverted index for full-text search capabilities", + default=True, + ) + + CLICKZETTA_ANALYZER_TYPE: Optional[str] = Field( + description="Analyzer type for full-text search: keyword, english, chinese, unicode", + default="chinese", + ) + + CLICKZETTA_ANALYZER_MODE: Optional[str] = Field( + description="Analyzer mode for tokenization: max_word (fine-grained) or smart (intelligent)", + default="smart", + ) + + CLICKZETTA_VECTOR_DISTANCE_FUNCTION: Optional[str] = Field( + description="Distance function for vector similarity: l2_distance or cosine_distance", + default="cosine_distance", + ) diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index f551bc2432..93f82e8e24 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -683,6 +683,7 @@ class DatasetRetrievalSettingApi(Resource): | VectorType.HUAWEI_CLOUD | VectorType.TENCENT | VectorType.MATRIXONE + | VectorType.CLICKZETTA ): return { "retrieval_method": [ @@ -731,6 +732,7 @@ class DatasetRetrievalSettingMockApi(Resource): | VectorType.TENCENT | VectorType.HUAWEI_CLOUD | VectorType.MATRIXONE + | VectorType.CLICKZETTA ): return { "retrieval_method": [ diff --git a/api/core/rag/datasource/vdb/clickzetta/README.md b/api/core/rag/datasource/vdb/clickzetta/README.md new file mode 100644 index 0000000000..40229f8d44 --- /dev/null +++ b/api/core/rag/datasource/vdb/clickzetta/README.md @@ -0,0 +1,190 @@ +# Clickzetta Vector Database Integration + +This module provides integration with Clickzetta Lakehouse as a vector database for Dify. + +## Features + +- **Vector Storage**: Store and retrieve high-dimensional vectors using Clickzetta's native VECTOR type +- **Vector Search**: Efficient similarity search using HNSW algorithm +- **Full-Text Search**: Leverage Clickzetta's inverted index for powerful text search capabilities +- **Hybrid Search**: Combine vector similarity and full-text search for better results +- **Multi-language Support**: Built-in support for Chinese, English, and Unicode text processing +- **Scalable**: Leverage Clickzetta's distributed architecture for large-scale deployments + +## Configuration + +### Required Environment Variables + +All seven configuration parameters are required: + +```bash +# Authentication +CLICKZETTA_USERNAME=your_username +CLICKZETTA_PASSWORD=your_password + +# Instance configuration +CLICKZETTA_INSTANCE=your_instance_id +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=your_workspace +CLICKZETTA_VCLUSTER=your_vcluster +CLICKZETTA_SCHEMA=your_schema +``` + +### Optional Configuration + +```bash +# Batch processing +CLICKZETTA_BATCH_SIZE=100 + +# Full-text search configuration +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese # Options: keyword, english, chinese, unicode +CLICKZETTA_ANALYZER_MODE=smart # Options: max_word, smart + +# Vector search configuration +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance # Options: l2_distance, cosine_distance +``` + +## Usage + +### 1. Set Clickzetta as the Vector Store + +In your Dify configuration, set: + +```bash +VECTOR_STORE=clickzetta +``` + +### 2. Table Structure + +Clickzetta will automatically create tables with the following structure: + +```sql +CREATE TABLE ( + id STRING NOT NULL, + content STRING NOT NULL, + metadata JSON, + vector VECTOR(FLOAT, ) NOT NULL, + PRIMARY KEY (id) +); + +-- Vector index for similarity search +CREATE VECTOR INDEX idx__vec +ON TABLE .(vector) +PROPERTIES ( + "distance.function" = "cosine_distance", + "scalar.type" = "f32" +); + +-- Inverted index for full-text search (if enabled) +CREATE INVERTED INDEX idx__text +ON .(content) +PROPERTIES ( + "analyzer" = "chinese", + "mode" = "smart" +); +``` + +## Full-Text Search Capabilities + +Clickzetta supports advanced full-text search with multiple analyzers: + +### Analyzer Types + +1. **keyword**: No tokenization, treats the entire string as a single token + - Best for: Exact matching, IDs, codes + +2. **english**: Designed for English text + - Features: Recognizes ASCII letters and numbers, converts to lowercase + - Best for: English content + +3. **chinese**: Chinese text tokenizer + - Features: Recognizes Chinese and English characters, removes punctuation + - Best for: Chinese or mixed Chinese-English content + +4. **unicode**: Multi-language tokenizer based on Unicode + - Features: Recognizes text boundaries in multiple languages + - Best for: Multi-language content + +### Analyzer Modes + +- **max_word**: Fine-grained tokenization (more tokens) +- **smart**: Intelligent tokenization (balanced) + +### Full-Text Search Functions + +- `MATCH_ALL(column, query)`: All terms must be present +- `MATCH_ANY(column, query)`: At least one term must be present +- `MATCH_PHRASE(column, query)`: Exact phrase matching +- `MATCH_PHRASE_PREFIX(column, query)`: Phrase prefix matching +- `MATCH_REGEXP(column, pattern)`: Regular expression matching + +## Performance Optimization + +### Vector Search + +1. **Adjust exploration factor** for accuracy vs speed trade-off: + ```sql + SET cz.vector.index.search.ef=64; + ``` + +2. **Use appropriate distance functions**: + - `cosine_distance`: Best for normalized embeddings (e.g., from language models) + - `l2_distance`: Best for raw feature vectors + +### Full-Text Search + +1. **Choose the right analyzer**: + - Use `keyword` for exact matching + - Use language-specific analyzers for better tokenization + +2. **Combine with vector search**: + - Pre-filter with full-text search for better performance + - Use hybrid search for improved relevance + +## Troubleshooting + +### Connection Issues + +1. Verify all 7 required configuration parameters are set +2. Check network connectivity to Clickzetta service +3. Ensure the user has proper permissions on the schema + +### Search Performance + +1. Verify vector index exists: + ```sql + SHOW INDEX FROM .; + ``` + +2. Check if vector index is being used: + ```sql + EXPLAIN SELECT ... WHERE l2_distance(...) < threshold; + ``` + Look for `vector_index_search_type` in the execution plan. + +### Full-Text Search Not Working + +1. Verify inverted index is created +2. Check analyzer configuration matches your content language +3. Use `TOKENIZE()` function to test tokenization: + ```sql + SELECT TOKENIZE('your text', map('analyzer', 'chinese', 'mode', 'smart')); + ``` + +## Limitations + +1. Vector operations don't support `ORDER BY` or `GROUP BY` directly on vector columns +2. Full-text search relevance scores are not provided by Clickzetta +3. Inverted index creation may fail for very large existing tables (continue without error) +4. Index naming constraints: + - Index names must be unique within a schema + - Only one vector index can be created per column + - The implementation uses timestamps to ensure unique index names +5. A column can only have one vector index at a time + +## References + +- [Clickzetta Vector Search Documentation](../../../../../../../yunqidoc/cn_markdown_20250526/vector-search.md) +- [Clickzetta Inverted Index Documentation](../../../../../../../yunqidoc/cn_markdown_20250526/inverted-index.md) +- [Clickzetta SQL Functions](../../../../../../../yunqidoc/cn_markdown_20250526/sql_functions/) diff --git a/api/core/rag/datasource/vdb/clickzetta/__init__.py b/api/core/rag/datasource/vdb/clickzetta/__init__.py new file mode 100644 index 0000000000..9d41c5a57d --- /dev/null +++ b/api/core/rag/datasource/vdb/clickzetta/__init__.py @@ -0,0 +1 @@ +# Clickzetta Vector Database Integration for Dify diff --git a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py new file mode 100644 index 0000000000..d295bab5aa --- /dev/null +++ b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py @@ -0,0 +1,834 @@ +import json +import logging +import queue +import threading +import uuid +from typing import Any, Optional, TYPE_CHECKING + +import clickzetta # type: ignore +from pydantic import BaseModel, model_validator + +if TYPE_CHECKING: + from clickzetta import Connection + +from configs import dify_config +from core.rag.datasource.vdb.field import Field +from core.rag.datasource.vdb.vector_base import BaseVector +from core.rag.datasource.vdb.vector_factory import AbstractVectorFactory +from core.rag.embedding.embedding_base import Embeddings +from core.rag.models.document import Document +from models.dataset import Dataset + +logger = logging.getLogger(__name__) + + +# ClickZetta Lakehouse Vector Database Configuration + + +class ClickzettaConfig(BaseModel): + """ + Configuration class for Clickzetta connection. + """ + + username: str + password: str + instance: str + service: str = "api.clickzetta.com" + workspace: str = "quick_start" + vcluster: str = "default_ap" + schema_name: str = "dify" # Renamed to avoid shadowing BaseModel.schema + # Advanced settings + batch_size: int = 20 # Reduced batch size to avoid large SQL statements + enable_inverted_index: bool = True # Enable inverted index for full-text search + analyzer_type: str = "chinese" # Analyzer type for full-text search: keyword, english, chinese, unicode + analyzer_mode: str = "smart" # Analyzer mode: max_word, smart + vector_distance_function: str = "cosine_distance" # l2_distance or cosine_distance + + @model_validator(mode="before") + @classmethod + def validate_config(cls, values: dict) -> dict: + """ + Validate the configuration values. + """ + if not values.get("username"): + raise ValueError("config CLICKZETTA_USERNAME is required") + if not values.get("password"): + raise ValueError("config CLICKZETTA_PASSWORD is required") + if not values.get("instance"): + raise ValueError("config CLICKZETTA_INSTANCE is required") + if not values.get("service"): + raise ValueError("config CLICKZETTA_SERVICE is required") + if not values.get("workspace"): + raise ValueError("config CLICKZETTA_WORKSPACE is required") + if not values.get("vcluster"): + raise ValueError("config CLICKZETTA_VCLUSTER is required") + if not values.get("schema_name"): + raise ValueError("config CLICKZETTA_SCHEMA is required") + return values + + +class ClickzettaVector(BaseVector): + """ + Clickzetta vector storage implementation. + """ + + # Class-level write queue and lock for serializing writes + _write_queue: Optional[queue.Queue] = None + _write_thread: Optional[threading.Thread] = None + _write_lock = threading.Lock() + _shutdown = False + + def __init__(self, collection_name: str, config: ClickzettaConfig): + super().__init__(collection_name) + self._config = config + self._table_name = collection_name.replace("-", "_").lower() # Ensure valid table name + self._connection: Optional["Connection"] = None + self._init_connection() + self._init_write_queue() + + def _init_connection(self): + """Initialize Clickzetta connection.""" + self._connection = clickzetta.connect( + username=self._config.username, + password=self._config.password, + instance=self._config.instance, + service=self._config.service, + workspace=self._config.workspace, + vcluster=self._config.vcluster, + schema=self._config.schema_name + ) + + # Set session parameters for better string handling and performance optimization + if self._connection is not None: + with self._connection.cursor() as cursor: + # Use quote mode for string literal escaping to handle quotes better + cursor.execute("SET cz.sql.string.literal.escape.mode = 'quote'") + logger.info("Set string literal escape mode to 'quote' for better quote handling") + + # Performance optimization hints for vector operations + self._set_performance_hints(cursor) + + def _set_performance_hints(self, cursor): + """Set ClickZetta performance optimization hints for vector operations.""" + try: + # Performance optimization hints for vector operations and query processing + performance_hints = [ + # Vector index optimization + "SET cz.storage.parquet.vector.index.read.memory.cache = true", + "SET cz.storage.parquet.vector.index.read.local.cache = false", + + # Query optimization + "SET cz.sql.table.scan.push.down.filter = true", + "SET cz.sql.table.scan.enable.ensure.filter = true", + "SET cz.storage.always.prefetch.internal = true", + "SET cz.optimizer.generate.columns.always.valid = true", + "SET cz.sql.index.prewhere.enabled = true", + + # Storage optimization + "SET cz.storage.parquet.enable.io.prefetch = false", + "SET cz.optimizer.enable.mv.rewrite = false", + "SET cz.sql.dump.as.lz4 = true", + "SET cz.optimizer.limited.optimization.naive.query = true", + "SET cz.sql.table.scan.enable.push.down.log = false", + "SET cz.storage.use.file.format.local.stats = false", + "SET cz.storage.local.file.object.cache.level = all", + + # Job execution optimization + "SET cz.sql.job.fast.mode = true", + "SET cz.storage.parquet.non.contiguous.read = true", + "SET cz.sql.compaction.after.commit = true" + ] + + for hint in performance_hints: + cursor.execute(hint) + + logger.info("Applied %d performance optimization hints for ClickZetta vector operations", len(performance_hints)) + + except Exception: + # Catch any errors setting performance hints but continue with defaults + logger.exception("Failed to set some performance hints, continuing with default settings") + + @classmethod + def _init_write_queue(cls): + """Initialize the write queue and worker thread.""" + with cls._write_lock: + if cls._write_queue is None: + cls._write_queue = queue.Queue() + cls._write_thread = threading.Thread(target=cls._write_worker, daemon=True) + cls._write_thread.start() + logger.info("Started Clickzetta write worker thread") + + @classmethod + def _write_worker(cls): + """Worker thread that processes write tasks sequentially.""" + while not cls._shutdown: + try: + # Get task from queue with timeout + if cls._write_queue is not None: + task = cls._write_queue.get(timeout=1) + if task is None: # Shutdown signal + break + + # Execute the write task + func, args, kwargs, result_queue = task + try: + result = func(*args, **kwargs) + result_queue.put((True, result)) + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Write task failed") + result_queue.put((False, e)) + finally: + cls._write_queue.task_done() + else: + break + except queue.Empty: + continue + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Write worker error") + + def _execute_write(self, func, *args, **kwargs): + """Execute a write operation through the queue.""" + if ClickzettaVector._write_queue is None: + raise RuntimeError("Write queue not initialized") + + result_queue: queue.Queue[tuple[bool, Any]] = queue.Queue() + ClickzettaVector._write_queue.put((func, args, kwargs, result_queue)) + + # Wait for result + success, result = result_queue.get() + if not success: + raise result + return result + + def get_type(self) -> str: + """Return the vector database type.""" + return "clickzetta" + + def _ensure_connection(self) -> "Connection": + """Ensure connection is available and return it.""" + if self._connection is None: + raise RuntimeError("Database connection not initialized") + return self._connection + + def _table_exists(self) -> bool: + """Check if the table exists.""" + try: + connection = self._ensure_connection() + with connection.cursor() as cursor: + cursor.execute(f"DESC {self._config.schema_name}.{self._table_name}") + return True + except (RuntimeError, ValueError) as e: + if "table or view not found" in str(e).lower(): + return False + else: + # Re-raise if it's a different error + raise + + def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs): + """Create the collection and add initial documents.""" + # Execute table creation through write queue to avoid concurrent conflicts + self._execute_write(self._create_table_and_indexes, embeddings) + + # Add initial texts + if texts: + self.add_texts(texts, embeddings, **kwargs) + + def _create_table_and_indexes(self, embeddings: list[list[float]]): + """Create table and indexes (executed in write worker thread).""" + # Check if table already exists to avoid unnecessary index creation + if self._table_exists(): + logger.info("Table %s.%s already exists, skipping creation", self._config.schema_name, self._table_name) + return + + # Create table with vector and metadata columns + dimension = len(embeddings[0]) if embeddings else 768 + + create_table_sql = f""" + CREATE TABLE IF NOT EXISTS {self._config.schema_name}.{self._table_name} ( + id STRING NOT NULL COMMENT 'Unique document identifier', + {Field.CONTENT_KEY.value} STRING NOT NULL COMMENT 'Document text content for search and retrieval', + {Field.METADATA_KEY.value} JSON COMMENT 'Document metadata including source, type, and other attributes', + {Field.VECTOR.value} VECTOR(FLOAT, {dimension}) NOT NULL COMMENT + 'High-dimensional embedding vector for semantic similarity search', + PRIMARY KEY (id) + ) COMMENT 'Dify RAG knowledge base vector storage table for document embeddings and content' + """ + + connection = self._ensure_connection() + with connection.cursor() as cursor: + cursor.execute(create_table_sql) + logger.info("Created table %s.%s", self._config.schema_name, self._table_name) + + # Create vector index + self._create_vector_index(cursor) + + # Create inverted index for full-text search if enabled + if self._config.enable_inverted_index: + self._create_inverted_index(cursor) + + def _create_vector_index(self, cursor): + """Create HNSW vector index for similarity search.""" + # Use a fixed index name based on table and column name + index_name = f"idx_{self._table_name}_vector" + + # First check if an index already exists on this column + try: + cursor.execute(f"SHOW INDEX FROM {self._config.schema_name}.{self._table_name}") + existing_indexes = cursor.fetchall() + for idx in existing_indexes: + # Check if vector index already exists on the embedding column + if Field.VECTOR.value in str(idx).lower(): + logger.info("Vector index already exists on column %s", Field.VECTOR.value) + return + except (RuntimeError, ValueError) as e: + logger.warning("Failed to check existing indexes: %s", e) + + index_sql = f""" + CREATE VECTOR INDEX IF NOT EXISTS {index_name} + ON TABLE {self._config.schema_name}.{self._table_name}({Field.VECTOR.value}) + PROPERTIES ( + "distance.function" = "{self._config.vector_distance_function}", + "scalar.type" = "f32", + "m" = "16", + "ef.construction" = "128" + ) + """ + try: + cursor.execute(index_sql) + logger.info("Created vector index: %s", index_name) + except (RuntimeError, ValueError) as e: + error_msg = str(e).lower() + if ("already exists" in error_msg or + "already has index" in error_msg or + "with the same type" in error_msg): + logger.info("Vector index already exists: %s", e) + else: + logger.exception("Failed to create vector index") + raise + + def _create_inverted_index(self, cursor): + """Create inverted index for full-text search.""" + # Use a fixed index name based on table name to avoid duplicates + index_name = f"idx_{self._table_name}_text" + + # Check if an inverted index already exists on this column + try: + cursor.execute(f"SHOW INDEX FROM {self._config.schema_name}.{self._table_name}") + existing_indexes = cursor.fetchall() + for idx in existing_indexes: + idx_str = str(idx).lower() + # More precise check: look for inverted index specifically on the content column + if ("inverted" in idx_str and + Field.CONTENT_KEY.value.lower() in idx_str and + (index_name.lower() in idx_str or f"idx_{self._table_name}_text" in idx_str)): + logger.info("Inverted index already exists on column %s: %s", Field.CONTENT_KEY.value, idx) + return + except (RuntimeError, ValueError) as e: + logger.warning("Failed to check existing indexes: %s", e) + + index_sql = f""" + CREATE INVERTED INDEX IF NOT EXISTS {index_name} + ON TABLE {self._config.schema_name}.{self._table_name} ({Field.CONTENT_KEY.value}) + PROPERTIES ( + "analyzer" = "{self._config.analyzer_type}", + "mode" = "{self._config.analyzer_mode}" + ) + """ + try: + cursor.execute(index_sql) + logger.info("Created inverted index: %s", index_name) + except (RuntimeError, ValueError) as e: + error_msg = str(e).lower() + # Handle ClickZetta specific error messages + if (("already exists" in error_msg or + "already has index" in error_msg or + "with the same type" in error_msg or + "cannot create inverted index" in error_msg) and + "already has index" in error_msg): + logger.info("Inverted index already exists on column %s", Field.CONTENT_KEY.value) + # Try to get the existing index name for logging + try: + cursor.execute(f"SHOW INDEX FROM {self._config.schema_name}.{self._table_name}") + existing_indexes = cursor.fetchall() + for idx in existing_indexes: + if "inverted" in str(idx).lower() and Field.CONTENT_KEY.value.lower() in str(idx).lower(): + logger.info("Found existing inverted index: %s", idx) + break + except (RuntimeError, ValueError): + pass + else: + logger.warning("Failed to create inverted index: %s", e) + # Continue without inverted index - full-text search will fall back to LIKE + + + def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): + """Add documents with embeddings to the collection.""" + if not documents: + return + + batch_size = self._config.batch_size + total_batches = (len(documents) + batch_size - 1) // batch_size + + for i in range(0, len(documents), batch_size): + batch_docs = documents[i:i + batch_size] + batch_embeddings = embeddings[i:i + batch_size] + + # Execute batch insert through write queue + self._execute_write(self._insert_batch, batch_docs, batch_embeddings, i, batch_size, total_batches) + + def _insert_batch(self, batch_docs: list[Document], batch_embeddings: list[list[float]], + batch_index: int, batch_size: int, total_batches: int): + """Insert a batch of documents using parameterized queries (executed in write worker thread).""" + if not batch_docs or not batch_embeddings: + logger.warning("Empty batch provided, skipping insertion") + return + + if len(batch_docs) != len(batch_embeddings): + logger.error("Mismatch between docs (%d) and embeddings (%d)", len(batch_docs), len(batch_embeddings)) + return + + # Prepare data for parameterized insertion + data_rows = [] + vector_dimension = len(batch_embeddings[0]) if batch_embeddings and batch_embeddings[0] else 768 + + for doc, embedding in zip(batch_docs, batch_embeddings): + # Optimized: minimal checks for common case, fallback for edge cases + metadata = doc.metadata if doc.metadata else {} + + if not isinstance(metadata, dict): + metadata = {} + + doc_id = self._safe_doc_id(metadata.get("doc_id", str(uuid.uuid4()))) + + # Fast path for JSON serialization + try: + metadata_json = json.dumps(metadata, ensure_ascii=True) + except (TypeError, ValueError): + logger.warning("JSON serialization failed, using empty dict") + metadata_json = "{}" + + content = doc.page_content or "" + + # According to ClickZetta docs, vector should be formatted as array string + # for external systems: '[1.0, 2.0, 3.0]' + vector_str = '[' + ','.join(map(str, embedding)) + ']' + data_rows.append([doc_id, content, metadata_json, vector_str]) + + # Check if we have any valid data to insert + if not data_rows: + logger.warning("No valid documents to insert in batch %d/%d", batch_index // batch_size + 1, total_batches) + return + + # Use parameterized INSERT with executemany for better performance and security + # Cast JSON and VECTOR in SQL, pass raw data as parameters + columns = f"id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value}, {Field.VECTOR.value}" + insert_sql = ( + f"INSERT INTO {self._config.schema_name}.{self._table_name} ({columns}) " + f"VALUES (?, ?, CAST(? AS JSON), CAST(? AS VECTOR({vector_dimension})))" + ) + + connection = self._ensure_connection() + with connection.cursor() as cursor: + try: + # Set session-level hints for batch insert operations + # Note: executemany doesn't support hints parameter, so we set them as session variables + cursor.execute("SET cz.sql.job.fast.mode = true") + cursor.execute("SET cz.sql.compaction.after.commit = true") + cursor.execute("SET cz.storage.always.prefetch.internal = true") + + cursor.executemany(insert_sql, data_rows) + logger.info( + f"Inserted batch {batch_index // batch_size + 1}/{total_batches} " + f"({len(data_rows)} valid docs using parameterized query with VECTOR({vector_dimension}) cast)" + ) + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Parameterized SQL execution failed for %d documents: %s", len(data_rows), e) + logger.exception("SQL template: %s", insert_sql) + logger.exception("Sample data row: %s", data_rows[0] if data_rows else 'None') + raise + + def text_exists(self, id: str) -> bool: + """Check if a document exists by ID.""" + safe_id = self._safe_doc_id(id) + connection = self._ensure_connection() + with connection.cursor() as cursor: + cursor.execute( + f"SELECT COUNT(*) FROM {self._config.schema_name}.{self._table_name} WHERE id = ?", + [safe_id] + ) + result = cursor.fetchone() + return result[0] > 0 if result else False + + def delete_by_ids(self, ids: list[str]) -> None: + """Delete documents by IDs.""" + if not ids: + return + + # Check if table exists before attempting delete + if not self._table_exists(): + logger.warning("Table %s.%s does not exist, skipping delete", self._config.schema_name, self._table_name) + return + + # Execute delete through write queue + self._execute_write(self._delete_by_ids_impl, ids) + + def _delete_by_ids_impl(self, ids: list[str]) -> None: + """Implementation of delete by IDs (executed in write worker thread).""" + safe_ids = [self._safe_doc_id(id) for id in ids] + # Create properly escaped string literals for SQL + id_list = ",".join(f"'{id}'" for id in safe_ids) + sql = f"DELETE FROM {self._config.schema_name}.{self._table_name} WHERE id IN ({id_list})" + + connection = self._ensure_connection() + with connection.cursor() as cursor: + cursor.execute(sql) + + def delete_by_metadata_field(self, key: str, value: str) -> None: + """Delete documents by metadata field.""" + # Check if table exists before attempting delete + if not self._table_exists(): + logger.warning("Table %s.%s does not exist, skipping delete", self._config.schema_name, self._table_name) + return + + # Execute delete through write queue + self._execute_write(self._delete_by_metadata_field_impl, key, value) + + def _delete_by_metadata_field_impl(self, key: str, value: str) -> None: + """Implementation of delete by metadata field (executed in write worker thread).""" + connection = self._ensure_connection() + with connection.cursor() as cursor: + # Using JSON path to filter with parameterized query + # Note: JSON path requires literal key name, cannot be parameterized + # Use json_extract_string function for ClickZetta compatibility + sql = (f"DELETE FROM {self._config.schema_name}.{self._table_name} " + f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?") + cursor.execute(sql, [value]) + + def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: + """Search for documents by vector similarity.""" + top_k = kwargs.get("top_k", 10) + score_threshold = kwargs.get("score_threshold", 0.0) + document_ids_filter = kwargs.get("document_ids_filter") + + # Handle filter parameter from canvas (workflow) + filter_param = kwargs.get("filter", {}) + + # Build filter clause + filter_clauses = [] + if document_ids_filter: + safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] + doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) + # Use json_extract_string function for ClickZetta compatibility + filter_clauses.append( + f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" + ) + + # No need for dataset_id filter since each dataset has its own table + + # Add distance threshold based on distance function + vector_dimension = len(query_vector) + if self._config.vector_distance_function == "cosine_distance": + # For cosine distance, smaller is better (0 = identical, 2 = opposite) + distance_func = "COSINE_DISTANCE" + if score_threshold > 0: + query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" + filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, " + f"{query_vector_str}) < {2 - score_threshold}") + else: + # For L2 distance, smaller is better + distance_func = "L2_DISTANCE" + if score_threshold > 0: + query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" + filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, " + f"{query_vector_str}) < {score_threshold}") + + where_clause = " AND ".join(filter_clauses) if filter_clauses else "1=1" + + # Execute vector search query + query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" + search_sql = f""" + SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value}, + {distance_func}({Field.VECTOR.value}, {query_vector_str}) AS distance + FROM {self._config.schema_name}.{self._table_name} + WHERE {where_clause} + ORDER BY distance + LIMIT {top_k} + """ + + documents = [] + connection = self._ensure_connection() + with connection.cursor() as cursor: + # Use hints parameter for vector search optimization + search_hints = { + 'hints': { + 'sdk.job.timeout': 60, # Increase timeout for vector search + 'cz.sql.job.fast.mode': True, + 'cz.storage.parquet.vector.index.read.memory.cache': True + } + } + cursor.execute(search_sql, parameters=search_hints) + results = cursor.fetchall() + + for row in results: + # Parse metadata from JSON string (may be double-encoded) + try: + if row[2]: + metadata = json.loads(row[2]) + + # If result is a string, it's double-encoded JSON - parse again + if isinstance(metadata, str): + metadata = json.loads(metadata) + + if not isinstance(metadata, dict): + metadata = {} + else: + metadata = {} + except (json.JSONDecodeError, TypeError) as e: + logger.error("JSON parsing failed: %s", e) + # Fallback: extract document_id with regex + import re + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} + + # Ensure required fields are set + metadata["doc_id"] = row[0] # segment id + + # Ensure document_id exists (critical for Dify's format_retrieval_documents) + if "document_id" not in metadata: + metadata["document_id"] = row[0] # fallback to segment id + + # Add score based on distance + if self._config.vector_distance_function == "cosine_distance": + metadata["score"] = 1 - (row[3] / 2) + else: + metadata["score"] = 1 / (1 + row[3]) + + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) + + return documents + + def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: + """Search for documents using full-text search with inverted index.""" + if not self._config.enable_inverted_index: + logger.warning("Full-text search is not enabled. Enable inverted index in config.") + return [] + + top_k = kwargs.get("top_k", 10) + document_ids_filter = kwargs.get("document_ids_filter") + + # Handle filter parameter from canvas (workflow) + filter_param = kwargs.get("filter", {}) + + # Build filter clause + filter_clauses = [] + if document_ids_filter: + safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] + doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) + # Use json_extract_string function for ClickZetta compatibility + filter_clauses.append( + f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" + ) + + # No need for dataset_id filter since each dataset has its own table + + # Use match_all function for full-text search + # match_all requires all terms to be present + # Use simple quote escaping for MATCH_ALL since it needs to be in the WHERE clause + escaped_query = query.replace("'", "''") + filter_clauses.append(f"MATCH_ALL({Field.CONTENT_KEY.value}, '{escaped_query}')") + + where_clause = " AND ".join(filter_clauses) + + # Execute full-text search query + search_sql = f""" + SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value} + FROM {self._config.schema_name}.{self._table_name} + WHERE {where_clause} + LIMIT {top_k} + """ + + documents = [] + connection = self._ensure_connection() + with connection.cursor() as cursor: + try: + # Use hints parameter for full-text search optimization + fulltext_hints = { + 'hints': { + 'sdk.job.timeout': 30, # Timeout for full-text search + 'cz.sql.job.fast.mode': True, + 'cz.sql.index.prewhere.enabled': True + } + } + cursor.execute(search_sql, parameters=fulltext_hints) + results = cursor.fetchall() + + for row in results: + # Parse metadata from JSON string (may be double-encoded) + try: + if row[2]: + metadata = json.loads(row[2]) + + # If result is a string, it's double-encoded JSON - parse again + if isinstance(metadata, str): + metadata = json.loads(metadata) + + if not isinstance(metadata, dict): + metadata = {} + else: + metadata = {} + except (json.JSONDecodeError, TypeError) as e: + logger.error("JSON parsing failed: %s", e) + # Fallback: extract document_id with regex + import re + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} + + # Ensure required fields are set + metadata["doc_id"] = row[0] # segment id + + # Ensure document_id exists (critical for Dify's format_retrieval_documents) + if "document_id" not in metadata: + metadata["document_id"] = row[0] # fallback to segment id + + # Add a relevance score for full-text search + metadata["score"] = 1.0 # Clickzetta doesn't provide relevance scores + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) + except (RuntimeError, ValueError, TypeError, ConnectionError) as e: + logger.exception("Full-text search failed") + # Fallback to LIKE search if full-text search fails + return self._search_by_like(query, **kwargs) + + return documents + + def _search_by_like(self, query: str, **kwargs: Any) -> list[Document]: + """Fallback search using LIKE operator.""" + top_k = kwargs.get("top_k", 10) + document_ids_filter = kwargs.get("document_ids_filter") + + # Handle filter parameter from canvas (workflow) + filter_param = kwargs.get("filter", {}) + + # Build filter clause + filter_clauses = [] + if document_ids_filter: + safe_doc_ids = [str(id).replace("'", "''") for id in document_ids_filter] + doc_ids_str = ",".join(f"'{id}'" for id in safe_doc_ids) + # Use json_extract_string function for ClickZetta compatibility + filter_clauses.append( + f"json_extract_string({Field.METADATA_KEY.value}, '$.document_id') IN ({doc_ids_str})" + ) + + # No need for dataset_id filter since each dataset has its own table + + # Use simple quote escaping for LIKE clause + escaped_query = query.replace("'", "''") + filter_clauses.append(f"{Field.CONTENT_KEY.value} LIKE '%{escaped_query}%'") + where_clause = " AND ".join(filter_clauses) + + search_sql = f""" + SELECT id, {Field.CONTENT_KEY.value}, {Field.METADATA_KEY.value} + FROM {self._config.schema_name}.{self._table_name} + WHERE {where_clause} + LIMIT {top_k} + """ + + documents = [] + connection = self._ensure_connection() + with connection.cursor() as cursor: + # Use hints parameter for LIKE search optimization + like_hints = { + 'hints': { + 'sdk.job.timeout': 20, # Timeout for LIKE search + 'cz.sql.job.fast.mode': True + } + } + cursor.execute(search_sql, parameters=like_hints) + results = cursor.fetchall() + + for row in results: + # Parse metadata from JSON string (may be double-encoded) + try: + if row[2]: + metadata = json.loads(row[2]) + + # If result is a string, it's double-encoded JSON - parse again + if isinstance(metadata, str): + metadata = json.loads(metadata) + + if not isinstance(metadata, dict): + metadata = {} + else: + metadata = {} + except (json.JSONDecodeError, TypeError) as e: + logger.error("JSON parsing failed: %s", e) + # Fallback: extract document_id with regex + import re + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} + + # Ensure required fields are set + metadata["doc_id"] = row[0] # segment id + + # Ensure document_id exists (critical for Dify's format_retrieval_documents) + if "document_id" not in metadata: + metadata["document_id"] = row[0] # fallback to segment id + + metadata["score"] = 0.5 # Lower score for LIKE search + doc = Document(page_content=row[1], metadata=metadata) + documents.append(doc) + + return documents + + def delete(self) -> None: + """Delete the entire collection.""" + connection = self._ensure_connection() + with connection.cursor() as cursor: + cursor.execute(f"DROP TABLE IF EXISTS {self._config.schema_name}.{self._table_name}") + + + def _format_vector_simple(self, vector: list[float]) -> str: + """Simple vector formatting for SQL queries.""" + return ','.join(map(str, vector)) + + def _safe_doc_id(self, doc_id: str) -> str: + """Ensure doc_id is safe for SQL and doesn't contain special characters.""" + if not doc_id: + return str(uuid.uuid4()) + # Remove or replace potentially problematic characters + safe_id = str(doc_id) + # Only allow alphanumeric, hyphens, underscores + safe_id = ''.join(c for c in safe_id if c.isalnum() or c in '-_') + if not safe_id: # If all characters were removed + return str(uuid.uuid4()) + return safe_id[:255] # Limit length + + + +class ClickzettaVectorFactory(AbstractVectorFactory): + """Factory for creating Clickzetta vector instances.""" + + def init_vector(self, dataset: Dataset, attributes: list, embeddings: Embeddings) -> BaseVector: + """Initialize a Clickzetta vector instance.""" + # Get configuration from environment variables or dataset config + config = ClickzettaConfig( + username=dify_config.CLICKZETTA_USERNAME or "", + password=dify_config.CLICKZETTA_PASSWORD or "", + instance=dify_config.CLICKZETTA_INSTANCE or "", + service=dify_config.CLICKZETTA_SERVICE or "api.clickzetta.com", + workspace=dify_config.CLICKZETTA_WORKSPACE or "quick_start", + vcluster=dify_config.CLICKZETTA_VCLUSTER or "default_ap", + schema_name=dify_config.CLICKZETTA_SCHEMA or "dify", + batch_size=dify_config.CLICKZETTA_BATCH_SIZE or 100, + enable_inverted_index=dify_config.CLICKZETTA_ENABLE_INVERTED_INDEX or True, + analyzer_type=dify_config.CLICKZETTA_ANALYZER_TYPE or "chinese", + analyzer_mode=dify_config.CLICKZETTA_ANALYZER_MODE or "smart", + vector_distance_function=dify_config.CLICKZETTA_VECTOR_DISTANCE_FUNCTION or "cosine_distance", + ) + + # Use dataset collection name as table name + collection_name = Dataset.gen_collection_name_by_id(dataset.id).lower() + + return ClickzettaVector(collection_name=collection_name, config=config) + diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index 43c49ed4b3..eef03ce412 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -172,6 +172,10 @@ class Vector: from core.rag.datasource.vdb.matrixone.matrixone_vector import MatrixoneVectorFactory return MatrixoneVectorFactory + case VectorType.CLICKZETTA: + from core.rag.datasource.vdb.clickzetta.clickzetta_vector import ClickzettaVectorFactory + + return ClickzettaVectorFactory case _: raise ValueError(f"Vector store {vector_type} is not supported.") diff --git a/api/core/rag/datasource/vdb/vector_type.py b/api/core/rag/datasource/vdb/vector_type.py index 0d70947b72..a415142196 100644 --- a/api/core/rag/datasource/vdb/vector_type.py +++ b/api/core/rag/datasource/vdb/vector_type.py @@ -30,3 +30,4 @@ class VectorType(StrEnum): TABLESTORE = "tablestore" HUAWEI_CLOUD = "huawei_cloud" MATRIXONE = "matrixone" + CLICKZETTA = "clickzetta" diff --git a/api/extensions/ext_storage.py b/api/extensions/ext_storage.py index bd35278544..d13393dd14 100644 --- a/api/extensions/ext_storage.py +++ b/api/extensions/ext_storage.py @@ -69,6 +69,19 @@ class Storage: from extensions.storage.supabase_storage import SupabaseStorage return SupabaseStorage + case StorageType.CLICKZETTA_VOLUME: + from extensions.storage.clickzetta_volume.clickzetta_volume_storage import ( + ClickZettaVolumeConfig, + ClickZettaVolumeStorage, + ) + + def create_clickzetta_volume_storage(): + # ClickZettaVolumeConfig will automatically read from environment variables + # and fallback to CLICKZETTA_* config if CLICKZETTA_VOLUME_* is not set + volume_config = ClickZettaVolumeConfig() + return ClickZettaVolumeStorage(volume_config) + + return create_clickzetta_volume_storage case _: raise ValueError(f"unsupported storage type {storage_type}") diff --git a/api/extensions/storage/clickzetta_volume/__init__.py b/api/extensions/storage/clickzetta_volume/__init__.py new file mode 100644 index 0000000000..8a1588034b --- /dev/null +++ b/api/extensions/storage/clickzetta_volume/__init__.py @@ -0,0 +1,5 @@ +"""ClickZetta Volume storage implementation.""" + +from .clickzetta_volume_storage import ClickZettaVolumeStorage + +__all__ = ["ClickZettaVolumeStorage"] diff --git a/api/extensions/storage/clickzetta_volume/clickzetta_volume_storage.py b/api/extensions/storage/clickzetta_volume/clickzetta_volume_storage.py new file mode 100644 index 0000000000..09ab37f42e --- /dev/null +++ b/api/extensions/storage/clickzetta_volume/clickzetta_volume_storage.py @@ -0,0 +1,530 @@ +"""ClickZetta Volume Storage Implementation + +This module provides storage backend using ClickZetta Volume functionality. +Supports Table Volume, User Volume, and External Volume types. +""" + +import logging +import os +import tempfile +from collections.abc import Generator +from io import BytesIO +from pathlib import Path +from typing import Optional + +import clickzetta # type: ignore[import] +from pydantic import BaseModel, model_validator + +from extensions.storage.base_storage import BaseStorage + +from .volume_permissions import VolumePermissionManager, check_volume_permission + +logger = logging.getLogger(__name__) + + +class ClickZettaVolumeConfig(BaseModel): + """Configuration for ClickZetta Volume storage.""" + + username: str = "" + password: str = "" + instance: str = "" + service: str = "api.clickzetta.com" + workspace: str = "quick_start" + vcluster: str = "default_ap" + schema_name: str = "dify" + volume_type: str = "table" # table|user|external + volume_name: Optional[str] = None # For external volumes + table_prefix: str = "dataset_" # Prefix for table volume names + dify_prefix: str = "dify_km" # Directory prefix for User Volume + permission_check: bool = True # Enable/disable permission checking + + @model_validator(mode="before") + @classmethod + def validate_config(cls, values: dict) -> dict: + """Validate the configuration values. + + This method will first try to use CLICKZETTA_VOLUME_* environment variables, + then fall back to CLICKZETTA_* environment variables (for vector DB config). + """ + import os + + # Helper function to get environment variable with fallback + def get_env_with_fallback(volume_key: str, fallback_key: str, default: str | None = None) -> str: + # First try CLICKZETTA_VOLUME_* specific config + volume_value = values.get(volume_key.lower().replace("clickzetta_volume_", "")) + if volume_value: + return str(volume_value) + + # Then try environment variables + volume_env = os.getenv(volume_key) + if volume_env: + return volume_env + + # Fall back to existing CLICKZETTA_* config + fallback_env = os.getenv(fallback_key) + if fallback_env: + return fallback_env + + return default or "" + + # Apply environment variables with fallback to existing CLICKZETTA_* config + values.setdefault("username", get_env_with_fallback("CLICKZETTA_VOLUME_USERNAME", "CLICKZETTA_USERNAME")) + values.setdefault("password", get_env_with_fallback("CLICKZETTA_VOLUME_PASSWORD", "CLICKZETTA_PASSWORD")) + values.setdefault("instance", get_env_with_fallback("CLICKZETTA_VOLUME_INSTANCE", "CLICKZETTA_INSTANCE")) + values.setdefault( + "service", get_env_with_fallback("CLICKZETTA_VOLUME_SERVICE", "CLICKZETTA_SERVICE", "api.clickzetta.com") + ) + values.setdefault( + "workspace", get_env_with_fallback("CLICKZETTA_VOLUME_WORKSPACE", "CLICKZETTA_WORKSPACE", "quick_start") + ) + values.setdefault( + "vcluster", get_env_with_fallback("CLICKZETTA_VOLUME_VCLUSTER", "CLICKZETTA_VCLUSTER", "default_ap") + ) + values.setdefault("schema_name", get_env_with_fallback("CLICKZETTA_VOLUME_SCHEMA", "CLICKZETTA_SCHEMA", "dify")) + + # Volume-specific configurations (no fallback to vector DB config) + values.setdefault("volume_type", os.getenv("CLICKZETTA_VOLUME_TYPE", "table")) + values.setdefault("volume_name", os.getenv("CLICKZETTA_VOLUME_NAME")) + values.setdefault("table_prefix", os.getenv("CLICKZETTA_VOLUME_TABLE_PREFIX", "dataset_")) + values.setdefault("dify_prefix", os.getenv("CLICKZETTA_VOLUME_DIFY_PREFIX", "dify_km")) + # 暂时禁用权限检查功能,直接设置为false + values.setdefault("permission_check", False) + + # Validate required fields + if not values.get("username"): + raise ValueError("CLICKZETTA_VOLUME_USERNAME or CLICKZETTA_USERNAME is required") + if not values.get("password"): + raise ValueError("CLICKZETTA_VOLUME_PASSWORD or CLICKZETTA_PASSWORD is required") + if not values.get("instance"): + raise ValueError("CLICKZETTA_VOLUME_INSTANCE or CLICKZETTA_INSTANCE is required") + + # Validate volume type + volume_type = values["volume_type"] + if volume_type not in ["table", "user", "external"]: + raise ValueError("CLICKZETTA_VOLUME_TYPE must be one of: table, user, external") + + if volume_type == "external" and not values.get("volume_name"): + raise ValueError("CLICKZETTA_VOLUME_NAME is required for external volume type") + + return values + + +class ClickZettaVolumeStorage(BaseStorage): + """ClickZetta Volume storage implementation.""" + + def __init__(self, config: ClickZettaVolumeConfig): + """Initialize ClickZetta Volume storage. + + Args: + config: ClickZetta Volume configuration + """ + self._config = config + self._connection = None + self._permission_manager: VolumePermissionManager | None = None + self._init_connection() + self._init_permission_manager() + + logger.info("ClickZetta Volume storage initialized with type: %s", config.volume_type) + + def _init_connection(self): + """Initialize ClickZetta connection.""" + try: + self._connection = clickzetta.connect( + username=self._config.username, + password=self._config.password, + instance=self._config.instance, + service=self._config.service, + workspace=self._config.workspace, + vcluster=self._config.vcluster, + schema=self._config.schema_name, + ) + logger.debug("ClickZetta connection established") + except Exception as e: + logger.exception("Failed to connect to ClickZetta") + raise + + def _init_permission_manager(self): + """Initialize permission manager.""" + try: + self._permission_manager = VolumePermissionManager( + self._connection, self._config.volume_type, self._config.volume_name + ) + logger.debug("Permission manager initialized") + except Exception as e: + logger.exception("Failed to initialize permission manager") + raise + + def _get_volume_path(self, filename: str, dataset_id: Optional[str] = None) -> str: + """Get the appropriate volume path based on volume type.""" + if self._config.volume_type == "user": + # Add dify prefix for User Volume to organize files + return f"{self._config.dify_prefix}/{filename}" + elif self._config.volume_type == "table": + # Check if this should use User Volume (special directories) + if dataset_id in ["upload_files", "temp", "cache", "tools", "website_files", "privkeys"]: + # Use User Volume with dify prefix for special directories + return f"{self._config.dify_prefix}/{filename}" + + if dataset_id: + return f"{self._config.table_prefix}{dataset_id}/{filename}" + else: + # Extract dataset_id from filename if not provided + # Format: dataset_id/filename + if "/" in filename: + return filename + else: + raise ValueError("dataset_id is required for table volume or filename must include dataset_id/") + elif self._config.volume_type == "external": + return filename + else: + raise ValueError(f"Unsupported volume type: {self._config.volume_type}") + + def _get_volume_sql_prefix(self, dataset_id: Optional[str] = None) -> str: + """Get SQL prefix for volume operations.""" + if self._config.volume_type == "user": + return "USER VOLUME" + elif self._config.volume_type == "table": + # For Dify's current file storage pattern, most files are stored in + # paths like "upload_files/tenant_id/uuid.ext", "tools/tenant_id/uuid.ext" + # These should use USER VOLUME for better compatibility + if dataset_id in ["upload_files", "temp", "cache", "tools", "website_files", "privkeys"]: + return "USER VOLUME" + + # Only use TABLE VOLUME for actual dataset-specific paths + # like "dataset_12345/file.pdf" or paths with dataset_ prefix + if dataset_id: + table_name = f"{self._config.table_prefix}{dataset_id}" + else: + # Default table name for generic operations + table_name = "default_dataset" + return f"TABLE VOLUME {table_name}" + elif self._config.volume_type == "external": + return f"VOLUME {self._config.volume_name}" + else: + raise ValueError(f"Unsupported volume type: {self._config.volume_type}") + + def _execute_sql(self, sql: str, fetch: bool = False): + """Execute SQL command.""" + try: + if self._connection is None: + raise RuntimeError("Connection not initialized") + with self._connection.cursor() as cursor: + cursor.execute(sql) + if fetch: + return cursor.fetchall() + return None + except Exception as e: + logger.exception("SQL execution failed: %s", sql) + raise + + def _ensure_table_volume_exists(self, dataset_id: str) -> None: + """Ensure table volume exists for the given dataset_id.""" + if self._config.volume_type != "table" or not dataset_id: + return + + # Skip for upload_files and other special directories that use USER VOLUME + if dataset_id in ["upload_files", "temp", "cache", "tools", "website_files", "privkeys"]: + return + + table_name = f"{self._config.table_prefix}{dataset_id}" + + try: + # Check if table exists + check_sql = f"SHOW TABLES LIKE '{table_name}'" + result = self._execute_sql(check_sql, fetch=True) + + if not result: + # Create table with volume + create_sql = f""" + CREATE TABLE {table_name} ( + id INT PRIMARY KEY AUTO_INCREMENT, + filename VARCHAR(255) NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX idx_filename (filename) + ) WITH VOLUME + """ + self._execute_sql(create_sql) + logger.info("Created table volume: %s", table_name) + + except Exception as e: + logger.warning("Failed to create table volume %s: %s", table_name, e) + # Don't raise exception, let the operation continue + # The table might exist but not be visible due to permissions + + def save(self, filename: str, data: bytes) -> None: + """Save data to ClickZetta Volume. + + Args: + filename: File path in volume + data: File content as bytes + """ + # Extract dataset_id from filename if present + dataset_id = None + if "/" in filename and self._config.volume_type == "table": + parts = filename.split("/", 1) + if parts[0].startswith(self._config.table_prefix): + dataset_id = parts[0][len(self._config.table_prefix) :] + filename = parts[1] + else: + dataset_id = parts[0] + filename = parts[1] + + # Ensure table volume exists (for table volumes) + if dataset_id: + self._ensure_table_volume_exists(dataset_id) + + # Check permissions (if enabled) + if self._config.permission_check: + # Skip permission check for special directories that use USER VOLUME + if dataset_id not in ["upload_files", "temp", "cache", "tools", "website_files", "privkeys"]: + if self._permission_manager is not None: + check_volume_permission(self._permission_manager, "save", dataset_id) + + # Write data to temporary file + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + temp_file.write(data) + temp_file_path = temp_file.name + + try: + # Upload to volume + volume_prefix = self._get_volume_sql_prefix(dataset_id) + + # Get the actual volume path (may include dify_km prefix) + volume_path = self._get_volume_path(filename, dataset_id) + actual_filename = volume_path.split("/")[-1] if "/" in volume_path else volume_path + + # For User Volume, use the full path with dify_km prefix + if volume_prefix == "USER VOLUME": + sql = f"PUT '{temp_file_path}' TO {volume_prefix} FILE '{volume_path}'" + else: + sql = f"PUT '{temp_file_path}' TO {volume_prefix} FILE '{filename}'" + + self._execute_sql(sql) + logger.debug("File %s saved to ClickZetta Volume at path %s", filename, volume_path) + finally: + # Clean up temporary file + Path(temp_file_path).unlink(missing_ok=True) + + def load_once(self, filename: str) -> bytes: + """Load file content from ClickZetta Volume. + + Args: + filename: File path in volume + + Returns: + File content as bytes + """ + # Extract dataset_id from filename if present + dataset_id = None + if "/" in filename and self._config.volume_type == "table": + parts = filename.split("/", 1) + if parts[0].startswith(self._config.table_prefix): + dataset_id = parts[0][len(self._config.table_prefix) :] + filename = parts[1] + else: + dataset_id = parts[0] + filename = parts[1] + + # Check permissions (if enabled) + if self._config.permission_check: + # Skip permission check for special directories that use USER VOLUME + if dataset_id not in ["upload_files", "temp", "cache", "tools", "website_files", "privkeys"]: + if self._permission_manager is not None: + check_volume_permission(self._permission_manager, "load_once", dataset_id) + + # Download to temporary directory + with tempfile.TemporaryDirectory() as temp_dir: + volume_prefix = self._get_volume_sql_prefix(dataset_id) + + # Get the actual volume path (may include dify_km prefix) + volume_path = self._get_volume_path(filename, dataset_id) + + # For User Volume, use the full path with dify_km prefix + if volume_prefix == "USER VOLUME": + sql = f"GET {volume_prefix} FILE '{volume_path}' TO '{temp_dir}'" + else: + sql = f"GET {volume_prefix} FILE '{filename}' TO '{temp_dir}'" + + self._execute_sql(sql) + + # Find the downloaded file (may be in subdirectories) + downloaded_file = None + for root, dirs, files in os.walk(temp_dir): + for file in files: + if file == filename or file == os.path.basename(filename): + downloaded_file = Path(root) / file + break + if downloaded_file: + break + + if not downloaded_file or not downloaded_file.exists(): + raise FileNotFoundError(f"Downloaded file not found: {filename}") + + content = downloaded_file.read_bytes() + + logger.debug("File %s loaded from ClickZetta Volume", filename) + return content + + def load_stream(self, filename: str) -> Generator: + """Load file as stream from ClickZetta Volume. + + Args: + filename: File path in volume + + Yields: + File content chunks + """ + content = self.load_once(filename) + batch_size = 4096 + stream = BytesIO(content) + + while chunk := stream.read(batch_size): + yield chunk + + logger.debug("File %s loaded as stream from ClickZetta Volume", filename) + + def download(self, filename: str, target_filepath: str): + """Download file from ClickZetta Volume to local path. + + Args: + filename: File path in volume + target_filepath: Local target file path + """ + content = self.load_once(filename) + + with Path(target_filepath).open("wb") as f: + f.write(content) + + logger.debug("File %s downloaded from ClickZetta Volume to %s", filename, target_filepath) + + def exists(self, filename: str) -> bool: + """Check if file exists in ClickZetta Volume. + + Args: + filename: File path in volume + + Returns: + True if file exists, False otherwise + """ + try: + # Extract dataset_id from filename if present + dataset_id = None + if "/" in filename and self._config.volume_type == "table": + parts = filename.split("/", 1) + if parts[0].startswith(self._config.table_prefix): + dataset_id = parts[0][len(self._config.table_prefix) :] + filename = parts[1] + else: + dataset_id = parts[0] + filename = parts[1] + + volume_prefix = self._get_volume_sql_prefix(dataset_id) + + # Get the actual volume path (may include dify_km prefix) + volume_path = self._get_volume_path(filename, dataset_id) + + # For User Volume, use the full path with dify_km prefix + if volume_prefix == "USER VOLUME": + sql = f"LIST {volume_prefix} REGEXP = '^{volume_path}$'" + else: + sql = f"LIST {volume_prefix} REGEXP = '^{filename}$'" + + rows = self._execute_sql(sql, fetch=True) + + exists = len(rows) > 0 + logger.debug("File %s exists check: %s", filename, exists) + return exists + except Exception as e: + logger.warning("Error checking file existence for %s: %s", filename, e) + return False + + def delete(self, filename: str): + """Delete file from ClickZetta Volume. + + Args: + filename: File path in volume + """ + if not self.exists(filename): + logger.debug("File %s not found, skip delete", filename) + return + + # Extract dataset_id from filename if present + dataset_id = None + if "/" in filename and self._config.volume_type == "table": + parts = filename.split("/", 1) + if parts[0].startswith(self._config.table_prefix): + dataset_id = parts[0][len(self._config.table_prefix) :] + filename = parts[1] + else: + dataset_id = parts[0] + filename = parts[1] + + volume_prefix = self._get_volume_sql_prefix(dataset_id) + + # Get the actual volume path (may include dify_km prefix) + volume_path = self._get_volume_path(filename, dataset_id) + + # For User Volume, use the full path with dify_km prefix + if volume_prefix == "USER VOLUME": + sql = f"REMOVE {volume_prefix} FILE '{volume_path}'" + else: + sql = f"REMOVE {volume_prefix} FILE '{filename}'" + + self._execute_sql(sql) + + logger.debug("File %s deleted from ClickZetta Volume", filename) + + def scan(self, path: str, files: bool = True, directories: bool = False) -> list[str]: + """Scan files and directories in ClickZetta Volume. + + Args: + path: Path to scan (dataset_id for table volumes) + files: Include files in results + directories: Include directories in results + + Returns: + List of file/directory paths + """ + try: + # For table volumes, path is treated as dataset_id + dataset_id = None + if self._config.volume_type == "table": + dataset_id = path + path = "" # Root of the table volume + + volume_prefix = self._get_volume_sql_prefix(dataset_id) + + # For User Volume, add dify prefix to path + if volume_prefix == "USER VOLUME": + if path: + scan_path = f"{self._config.dify_prefix}/{path}" + sql = f"LIST {volume_prefix} SUBDIRECTORY '{scan_path}'" + else: + sql = f"LIST {volume_prefix} SUBDIRECTORY '{self._config.dify_prefix}'" + else: + if path: + sql = f"LIST {volume_prefix} SUBDIRECTORY '{path}'" + else: + sql = f"LIST {volume_prefix}" + + rows = self._execute_sql(sql, fetch=True) + + result = [] + for row in rows: + file_path = row[0] # relative_path column + + # For User Volume, remove dify prefix from results + dify_prefix_with_slash = f"{self._config.dify_prefix}/" + if volume_prefix == "USER VOLUME" and file_path.startswith(dify_prefix_with_slash): + file_path = file_path[len(dify_prefix_with_slash) :] # Remove prefix + + if files and not file_path.endswith("/") or directories and file_path.endswith("/"): + result.append(file_path) + + logger.debug("Scanned %d items in path %s", len(result), path) + return result + + except Exception as e: + logger.exception("Error scanning path %s", path) + return [] diff --git a/api/extensions/storage/clickzetta_volume/file_lifecycle.py b/api/extensions/storage/clickzetta_volume/file_lifecycle.py new file mode 100644 index 0000000000..d5d04f121b --- /dev/null +++ b/api/extensions/storage/clickzetta_volume/file_lifecycle.py @@ -0,0 +1,516 @@ +"""ClickZetta Volume文件生命周期管理 + +该模块提供文件版本控制、自动清理、备份和恢复等生命周期管理功能。 +支持知识库文件的完整生命周期管理。 +""" + +import json +import logging +from dataclasses import asdict, dataclass +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Optional + +logger = logging.getLogger(__name__) + + +class FileStatus(Enum): + """文件状态枚举""" + + ACTIVE = "active" # 活跃状态 + ARCHIVED = "archived" # 已归档 + DELETED = "deleted" # 已删除(软删除) + BACKUP = "backup" # 备份文件 + + +@dataclass +class FileMetadata: + """文件元数据""" + + filename: str + size: int | None + created_at: datetime + modified_at: datetime + version: int | None + status: FileStatus + checksum: Optional[str] = None + tags: Optional[dict[str, str]] = None + parent_version: Optional[int] = None + + def to_dict(self) -> dict: + """转换为字典格式""" + data = asdict(self) + data["created_at"] = self.created_at.isoformat() + data["modified_at"] = self.modified_at.isoformat() + data["status"] = self.status.value + return data + + @classmethod + def from_dict(cls, data: dict) -> "FileMetadata": + """从字典创建实例""" + data = data.copy() + data["created_at"] = datetime.fromisoformat(data["created_at"]) + data["modified_at"] = datetime.fromisoformat(data["modified_at"]) + data["status"] = FileStatus(data["status"]) + return cls(**data) + + +class FileLifecycleManager: + """文件生命周期管理器""" + + def __init__(self, storage, dataset_id: Optional[str] = None): + """初始化生命周期管理器 + + Args: + storage: ClickZetta Volume存储实例 + dataset_id: 数据集ID(用于Table Volume) + """ + self._storage = storage + self._dataset_id = dataset_id + self._metadata_file = ".dify_file_metadata.json" + self._version_prefix = ".versions/" + self._backup_prefix = ".backups/" + self._deleted_prefix = ".deleted/" + + # 获取权限管理器(如果存在) + self._permission_manager: Optional[Any] = getattr(storage, "_permission_manager", None) + + def save_with_lifecycle(self, filename: str, data: bytes, tags: Optional[dict[str, str]] = None) -> FileMetadata: + """保存文件并管理生命周期 + + Args: + filename: 文件名 + data: 文件内容 + tags: 文件标签 + + Returns: + 文件元数据 + """ + # 权限检查 + if not self._check_permission(filename, "save"): + from .volume_permissions import VolumePermissionError + + raise VolumePermissionError( + f"Permission denied for lifecycle save operation on file: {filename}", + operation="save", + volume_type=getattr(self._storage, "_config", {}).get("volume_type", "unknown"), + dataset_id=self._dataset_id, + ) + + try: + # 1. 检查是否存在旧版本 + metadata_dict = self._load_metadata() + current_metadata = metadata_dict.get(filename) + + # 2. 如果存在旧版本,创建版本备份 + if current_metadata: + self._create_version_backup(filename, current_metadata) + + # 3. 计算文件信息 + now = datetime.now() + checksum = self._calculate_checksum(data) + new_version = (current_metadata["version"] + 1) if current_metadata else 1 + + # 4. 保存新文件 + self._storage.save(filename, data) + + # 5. 创建元数据 + created_at = now + parent_version = None + + if current_metadata: + # 如果created_at是字符串,转换为datetime + if isinstance(current_metadata["created_at"], str): + created_at = datetime.fromisoformat(current_metadata["created_at"]) + else: + created_at = current_metadata["created_at"] + parent_version = current_metadata["version"] + + file_metadata = FileMetadata( + filename=filename, + size=len(data), + created_at=created_at, + modified_at=now, + version=new_version, + status=FileStatus.ACTIVE, + checksum=checksum, + tags=tags or {}, + parent_version=parent_version, + ) + + # 6. 更新元数据 + metadata_dict[filename] = file_metadata.to_dict() + self._save_metadata(metadata_dict) + + logger.info("File %s saved with lifecycle management, version %s", filename, new_version) + return file_metadata + + except Exception as e: + logger.exception("Failed to save file with lifecycle") + raise + + def get_file_metadata(self, filename: str) -> Optional[FileMetadata]: + """获取文件元数据 + + Args: + filename: 文件名 + + Returns: + 文件元数据,如果不存在返回None + """ + try: + metadata_dict = self._load_metadata() + if filename in metadata_dict: + return FileMetadata.from_dict(metadata_dict[filename]) + return None + except Exception as e: + logger.exception("Failed to get file metadata for %s", filename) + return None + + def list_file_versions(self, filename: str) -> list[FileMetadata]: + """列出文件的所有版本 + + Args: + filename: 文件名 + + Returns: + 文件版本列表,按版本号排序 + """ + try: + versions = [] + + # 获取当前版本 + current_metadata = self.get_file_metadata(filename) + if current_metadata: + versions.append(current_metadata) + + # 获取历史版本 + version_pattern = f"{self._version_prefix}{filename}.v*" + try: + version_files = self._storage.scan(self._dataset_id or "", files=True) + for file_path in version_files: + if file_path.startswith(f"{self._version_prefix}{filename}.v"): + # 解析版本号 + version_str = file_path.split(".v")[-1].split(".")[0] + try: + version_num = int(version_str) + # 这里简化处理,实际应该从版本文件中读取元数据 + # 暂时创建基本的元数据信息 + except ValueError: + continue + except: + # 如果无法扫描版本文件,只返回当前版本 + pass + + return sorted(versions, key=lambda x: x.version or 0, reverse=True) + + except Exception as e: + logger.exception("Failed to list file versions for %s", filename) + return [] + + def restore_version(self, filename: str, version: int) -> bool: + """恢复文件到指定版本 + + Args: + filename: 文件名 + version: 要恢复的版本号 + + Returns: + 恢复是否成功 + """ + try: + version_filename = f"{self._version_prefix}{filename}.v{version}" + + # 检查版本文件是否存在 + if not self._storage.exists(version_filename): + logger.warning("Version %s of %s not found", version, filename) + return False + + # 读取版本文件内容 + version_data = self._storage.load_once(version_filename) + + # 保存当前版本为备份 + current_metadata = self.get_file_metadata(filename) + if current_metadata: + self._create_version_backup(filename, current_metadata.to_dict()) + + # 恢复文件 + self.save_with_lifecycle(filename, version_data, {"restored_from": str(version)}) + return True + + except Exception as e: + logger.exception("Failed to restore %s to version %s", filename, version) + return False + + def archive_file(self, filename: str) -> bool: + """归档文件 + + Args: + filename: 文件名 + + Returns: + 归档是否成功 + """ + # 权限检查 + if not self._check_permission(filename, "archive"): + logger.warning("Permission denied for archive operation on file: %s", filename) + return False + + try: + # 更新文件状态为归档 + metadata_dict = self._load_metadata() + if filename not in metadata_dict: + logger.warning("File %s not found in metadata", filename) + return False + + metadata_dict[filename]["status"] = FileStatus.ARCHIVED.value + metadata_dict[filename]["modified_at"] = datetime.now().isoformat() + + self._save_metadata(metadata_dict) + + logger.info("File %s archived successfully", filename) + return True + + except Exception as e: + logger.exception("Failed to archive file %s", filename) + return False + + def soft_delete_file(self, filename: str) -> bool: + """软删除文件(移动到删除目录) + + Args: + filename: 文件名 + + Returns: + 删除是否成功 + """ + # 权限检查 + if not self._check_permission(filename, "delete"): + logger.warning("Permission denied for soft delete operation on file: %s", filename) + return False + + try: + # 检查文件是否存在 + if not self._storage.exists(filename): + logger.warning("File %s not found", filename) + return False + + # 读取文件内容 + file_data = self._storage.load_once(filename) + + # 移动到删除目录 + deleted_filename = f"{self._deleted_prefix}{filename}.{datetime.now().strftime('%Y%m%d_%H%M%S')}" + self._storage.save(deleted_filename, file_data) + + # 删除原文件 + self._storage.delete(filename) + + # 更新元数据 + metadata_dict = self._load_metadata() + if filename in metadata_dict: + metadata_dict[filename]["status"] = FileStatus.DELETED.value + metadata_dict[filename]["modified_at"] = datetime.now().isoformat() + self._save_metadata(metadata_dict) + + logger.info("File %s soft deleted successfully", filename) + return True + + except Exception as e: + logger.exception("Failed to soft delete file %s", filename) + return False + + def cleanup_old_versions(self, max_versions: int = 5, max_age_days: int = 30) -> int: + """清理旧版本文件 + + Args: + max_versions: 保留的最大版本数 + max_age_days: 版本文件的最大保留天数 + + Returns: + 清理的文件数量 + """ + try: + cleaned_count = 0 + cutoff_date = datetime.now() - timedelta(days=max_age_days) + + # 获取所有版本文件 + try: + all_files = self._storage.scan(self._dataset_id or "", files=True) + version_files = [f for f in all_files if f.startswith(self._version_prefix)] + + # 按文件分组 + file_versions: dict[str, list[tuple[int, str]]] = {} + for version_file in version_files: + # 解析文件名和版本 + parts = version_file[len(self._version_prefix) :].split(".v") + if len(parts) >= 2: + base_filename = parts[0] + version_part = parts[1].split(".")[0] + try: + version_num = int(version_part) + if base_filename not in file_versions: + file_versions[base_filename] = [] + file_versions[base_filename].append((version_num, version_file)) + except ValueError: + continue + + # 清理每个文件的旧版本 + for base_filename, versions in file_versions.items(): + # 按版本号排序 + versions.sort(key=lambda x: x[0], reverse=True) + + # 保留最新的max_versions个版本,删除其余的 + if len(versions) > max_versions: + to_delete = versions[max_versions:] + for version_num, version_file in to_delete: + self._storage.delete(version_file) + cleaned_count += 1 + logger.debug("Cleaned old version: %s", version_file) + + logger.info("Cleaned %d old version files", cleaned_count) + + except Exception as e: + logger.warning("Could not scan for version files: %s", e) + + return cleaned_count + + except Exception as e: + logger.exception("Failed to cleanup old versions") + return 0 + + def get_storage_statistics(self) -> dict[str, Any]: + """获取存储统计信息 + + Returns: + 存储统计字典 + """ + try: + metadata_dict = self._load_metadata() + + stats: dict[str, Any] = { + "total_files": len(metadata_dict), + "active_files": 0, + "archived_files": 0, + "deleted_files": 0, + "total_size": 0, + "versions_count": 0, + "oldest_file": None, + "newest_file": None, + } + + oldest_date = None + newest_date = None + + for filename, metadata in metadata_dict.items(): + file_meta = FileMetadata.from_dict(metadata) + + # 统计文件状态 + if file_meta.status == FileStatus.ACTIVE: + stats["active_files"] = (stats["active_files"] or 0) + 1 + elif file_meta.status == FileStatus.ARCHIVED: + stats["archived_files"] = (stats["archived_files"] or 0) + 1 + elif file_meta.status == FileStatus.DELETED: + stats["deleted_files"] = (stats["deleted_files"] or 0) + 1 + + # 统计大小 + stats["total_size"] = (stats["total_size"] or 0) + (file_meta.size or 0) + + # 统计版本 + stats["versions_count"] = (stats["versions_count"] or 0) + (file_meta.version or 0) + + # 找出最新和最旧的文件 + if oldest_date is None or file_meta.created_at < oldest_date: + oldest_date = file_meta.created_at + stats["oldest_file"] = filename + + if newest_date is None or file_meta.modified_at > newest_date: + newest_date = file_meta.modified_at + stats["newest_file"] = filename + + return stats + + except Exception as e: + logger.exception("Failed to get storage statistics") + return {} + + def _create_version_backup(self, filename: str, metadata: dict): + """创建版本备份""" + try: + # 读取当前文件内容 + current_data = self._storage.load_once(filename) + + # 保存为版本文件 + version_filename = f"{self._version_prefix}{filename}.v{metadata['version']}" + self._storage.save(version_filename, current_data) + + logger.debug("Created version backup: %s", version_filename) + + except Exception as e: + logger.warning("Failed to create version backup for %s: %s", filename, e) + + def _load_metadata(self) -> dict[str, Any]: + """加载元数据文件""" + try: + if self._storage.exists(self._metadata_file): + metadata_content = self._storage.load_once(self._metadata_file) + result = json.loads(metadata_content.decode("utf-8")) + return dict(result) if result else {} + else: + return {} + except Exception as e: + logger.warning("Failed to load metadata: %s", e) + return {} + + def _save_metadata(self, metadata_dict: dict): + """保存元数据文件""" + try: + metadata_content = json.dumps(metadata_dict, indent=2, ensure_ascii=False) + self._storage.save(self._metadata_file, metadata_content.encode("utf-8")) + logger.debug("Metadata saved successfully") + except Exception as e: + logger.exception("Failed to save metadata") + raise + + def _calculate_checksum(self, data: bytes) -> str: + """计算文件校验和""" + import hashlib + + return hashlib.md5(data).hexdigest() + + def _check_permission(self, filename: str, operation: str) -> bool: + """检查文件操作权限 + + Args: + filename: 文件名 + operation: 操作类型 + + Returns: + True if permission granted, False otherwise + """ + # 如果没有权限管理器,默认允许 + if not self._permission_manager: + return True + + try: + # 根据操作类型映射到权限 + operation_mapping = { + "save": "save", + "load": "load_once", + "delete": "delete", + "archive": "delete", # 归档需要删除权限 + "restore": "save", # 恢复需要写权限 + "cleanup": "delete", # 清理需要删除权限 + "read": "load_once", + "write": "save", + } + + mapped_operation = operation_mapping.get(operation, operation) + + # 检查权限 + result = self._permission_manager.validate_operation(mapped_operation, self._dataset_id) + return bool(result) + + except Exception as e: + logger.exception("Permission check failed for %s operation %s", filename, operation) + # 安全默认:权限检查失败时拒绝访问 + return False diff --git a/api/extensions/storage/clickzetta_volume/volume_permissions.py b/api/extensions/storage/clickzetta_volume/volume_permissions.py new file mode 100644 index 0000000000..4801df5102 --- /dev/null +++ b/api/extensions/storage/clickzetta_volume/volume_permissions.py @@ -0,0 +1,646 @@ +"""ClickZetta Volume权限管理机制 + +该模块提供Volume权限检查、验证和管理功能。 +根据ClickZetta的权限模型,不同Volume类型有不同的权限要求。 +""" + +import logging +from enum import Enum +from typing import Optional + +logger = logging.getLogger(__name__) + + +class VolumePermission(Enum): + """Volume权限类型枚举""" + + READ = "SELECT" # 对应ClickZetta的SELECT权限 + WRITE = "INSERT,UPDATE,DELETE" # 对应ClickZetta的写权限 + LIST = "SELECT" # 列出文件需要SELECT权限 + DELETE = "INSERT,UPDATE,DELETE" # 删除文件需要写权限 + USAGE = "USAGE" # External Volume需要的基本权限 + + +class VolumePermissionManager: + """Volume权限管理器""" + + def __init__(self, connection_or_config, volume_type: str | None = None, volume_name: Optional[str] = None): + """初始化权限管理器 + + Args: + connection_or_config: ClickZetta连接对象或配置字典 + volume_type: Volume类型 (user|table|external) + volume_name: Volume名称 (用于external volume) + """ + # 支持两种初始化方式:连接对象或配置字典 + if isinstance(connection_or_config, dict): + # 从配置字典创建连接 + import clickzetta # type: ignore[import-untyped] + + config = connection_or_config + self._connection = clickzetta.connect( + username=config.get("username"), + password=config.get("password"), + instance=config.get("instance"), + service=config.get("service"), + workspace=config.get("workspace"), + vcluster=config.get("vcluster"), + schema=config.get("schema") or config.get("database"), + ) + self._volume_type = config.get("volume_type", volume_type) + self._volume_name = config.get("volume_name", volume_name) + else: + # 直接使用连接对象 + self._connection = connection_or_config + self._volume_type = volume_type + self._volume_name = volume_name + + if not self._connection: + raise ValueError("Valid connection or config is required") + if not self._volume_type: + raise ValueError("volume_type is required") + + self._permission_cache: dict[str, set[str]] = {} + self._current_username = None # 将从连接中获取当前用户名 + + def check_permission(self, operation: VolumePermission, dataset_id: Optional[str] = None) -> bool: + """检查用户是否有执行特定操作的权限 + + Args: + operation: 要执行的操作类型 + dataset_id: 数据集ID (用于table volume) + + Returns: + True if user has permission, False otherwise + """ + try: + if self._volume_type == "user": + return self._check_user_volume_permission(operation) + elif self._volume_type == "table": + return self._check_table_volume_permission(operation, dataset_id) + elif self._volume_type == "external": + return self._check_external_volume_permission(operation) + else: + logger.warning("Unknown volume type: %s", self._volume_type) + return False + + except Exception as e: + logger.exception("Permission check failed") + return False + + def _check_user_volume_permission(self, operation: VolumePermission) -> bool: + """检查User Volume权限 + + User Volume权限规则: + - 用户对自己的User Volume有全部权限 + - 只要用户能够连接到ClickZetta,就默认具有User Volume的基本权限 + - 更注重连接身份验证,而不是复杂的权限检查 + """ + try: + # 获取当前用户名 + current_user = self._get_current_username() + + # 检查基本连接状态 + with self._connection.cursor() as cursor: + # 简单的连接测试,如果能执行查询说明用户有基本权限 + cursor.execute("SELECT 1") + result = cursor.fetchone() + + if result: + logger.debug( + "User Volume permission check for %s, operation %s: granted (basic connection verified)", + current_user, + operation.name, + ) + return True + else: + logger.warning( + "User Volume permission check failed: cannot verify basic connection for %s", current_user + ) + return False + + except Exception as e: + logger.exception("User Volume permission check failed") + # 对于User Volume,如果权限检查失败,可能是配置问题,给出更友好的错误提示 + logger.info("User Volume permission check failed, but permission checking is disabled in this version") + return False + + def _check_table_volume_permission(self, operation: VolumePermission, dataset_id: Optional[str]) -> bool: + """检查Table Volume权限 + + Table Volume权限规则: + - Table Volume权限继承对应表的权限 + - SELECT权限 -> 可以READ/LIST文件 + - INSERT,UPDATE,DELETE权限 -> 可以WRITE/DELETE文件 + """ + if not dataset_id: + logger.warning("dataset_id is required for table volume permission check") + return False + + table_name = f"dataset_{dataset_id}" if not dataset_id.startswith("dataset_") else dataset_id + + try: + # 检查表权限 + permissions = self._get_table_permissions(table_name) + required_permissions = set(operation.value.split(",")) + + # 检查是否有所需的所有权限 + has_permission = required_permissions.issubset(permissions) + + logger.debug( + "Table Volume permission check for %s, operation %s: required=%s, has=%s, granted=%s", + table_name, + operation.name, + required_permissions, + permissions, + has_permission, + ) + + return has_permission + + except Exception as e: + logger.exception("Table volume permission check failed for %s", table_name) + return False + + def _check_external_volume_permission(self, operation: VolumePermission) -> bool: + """检查External Volume权限 + + External Volume权限规则: + - 尝试获取对External Volume的权限 + - 如果权限检查失败,进行备选验证 + - 对于开发环境,提供更宽松的权限检查 + """ + if not self._volume_name: + logger.warning("volume_name is required for external volume permission check") + return False + + try: + # 检查External Volume权限 + permissions = self._get_external_volume_permissions(self._volume_name) + + # External Volume权限映射:根据操作类型确定所需权限 + required_permissions = set() + + if operation in [VolumePermission.READ, VolumePermission.LIST]: + required_permissions.add("read") + elif operation in [VolumePermission.WRITE, VolumePermission.DELETE]: + required_permissions.add("write") + + # 检查是否有所需的所有权限 + has_permission = required_permissions.issubset(permissions) + + logger.debug( + "External Volume permission check for %s, operation %s: required=%s, has=%s, granted=%s", + self._volume_name, + operation.name, + required_permissions, + permissions, + has_permission, + ) + + # 如果权限检查失败,尝试备选验证 + if not has_permission: + logger.info("Direct permission check failed for %s, trying fallback verification", self._volume_name) + + # 备选验证:尝试列出Volume来验证基本访问权限 + try: + with self._connection.cursor() as cursor: + cursor.execute("SHOW VOLUMES") + volumes = cursor.fetchall() + for volume in volumes: + if len(volume) > 0 and volume[0] == self._volume_name: + logger.info("Fallback verification successful for %s", self._volume_name) + return True + except Exception as fallback_e: + logger.warning("Fallback verification failed for %s: %s", self._volume_name, fallback_e) + + return has_permission + + except Exception as e: + logger.exception("External volume permission check failed for %s", self._volume_name) + logger.info("External Volume permission check failed, but permission checking is disabled in this version") + return False + + def _get_table_permissions(self, table_name: str) -> set[str]: + """获取用户对指定表的权限 + + Args: + table_name: 表名 + + Returns: + 用户对该表的权限集合 + """ + cache_key = f"table:{table_name}" + + if cache_key in self._permission_cache: + return self._permission_cache[cache_key] + + permissions = set() + + try: + with self._connection.cursor() as cursor: + # 使用正确的ClickZetta语法检查当前用户权限 + cursor.execute("SHOW GRANTS") + grants = cursor.fetchall() + + # 解析权限结果,查找对该表的权限 + for grant in grants: + if len(grant) >= 3: # 典型格式: (privilege, object_type, object_name, ...) + privilege = grant[0].upper() + object_type = grant[1].upper() if len(grant) > 1 else "" + object_name = grant[2] if len(grant) > 2 else "" + + # 检查是否是对该表的权限 + if ( + object_type == "TABLE" + and object_name == table_name + or object_type == "SCHEMA" + and object_name in table_name + ): + if privilege in ["SELECT", "INSERT", "UPDATE", "DELETE", "ALL"]: + if privilege == "ALL": + permissions.update(["SELECT", "INSERT", "UPDATE", "DELETE"]) + else: + permissions.add(privilege) + + # 如果没有找到明确的权限,尝试执行一个简单的查询来验证权限 + if not permissions: + try: + cursor.execute(f"SELECT COUNT(*) FROM {table_name} LIMIT 1") + permissions.add("SELECT") + except Exception: + logger.debug("Cannot query table %s, no SELECT permission", table_name) + + except Exception as e: + logger.warning("Could not check table permissions for %s: %s", table_name, e) + # 安全默认:权限检查失败时拒绝访问 + pass + + # 缓存权限信息 + self._permission_cache[cache_key] = permissions + return permissions + + def _get_current_username(self) -> str: + """获取当前用户名""" + if self._current_username: + return self._current_username + + try: + with self._connection.cursor() as cursor: + cursor.execute("SELECT CURRENT_USER()") + result = cursor.fetchone() + if result: + self._current_username = result[0] + return str(self._current_username) + except Exception as e: + logger.exception("Failed to get current username") + + return "unknown" + + def _get_user_permissions(self, username: str) -> set[str]: + """获取用户的基本权限集合""" + cache_key = f"user_permissions:{username}" + + if cache_key in self._permission_cache: + return self._permission_cache[cache_key] + + permissions = set() + + try: + with self._connection.cursor() as cursor: + # 使用正确的ClickZetta语法检查当前用户权限 + cursor.execute("SHOW GRANTS") + grants = cursor.fetchall() + + # 解析权限结果,查找用户的基本权限 + for grant in grants: + if len(grant) >= 3: # 典型格式: (privilege, object_type, object_name, ...) + privilege = grant[0].upper() + object_type = grant[1].upper() if len(grant) > 1 else "" + + # 收集所有相关权限 + if privilege in ["SELECT", "INSERT", "UPDATE", "DELETE", "ALL"]: + if privilege == "ALL": + permissions.update(["SELECT", "INSERT", "UPDATE", "DELETE"]) + else: + permissions.add(privilege) + + except Exception as e: + logger.warning("Could not check user permissions for %s: %s", username, e) + # 安全默认:权限检查失败时拒绝访问 + pass + + # 缓存权限信息 + self._permission_cache[cache_key] = permissions + return permissions + + def _get_external_volume_permissions(self, volume_name: str) -> set[str]: + """获取用户对指定External Volume的权限 + + Args: + volume_name: External Volume名称 + + Returns: + 用户对该Volume的权限集合 + """ + cache_key = f"external_volume:{volume_name}" + + if cache_key in self._permission_cache: + return self._permission_cache[cache_key] + + permissions = set() + + try: + with self._connection.cursor() as cursor: + # 使用正确的ClickZetta语法检查Volume权限 + logger.info("Checking permissions for volume: %s", volume_name) + cursor.execute(f"SHOW GRANTS ON VOLUME {volume_name}") + grants = cursor.fetchall() + + logger.info("Raw grants result for %s: %s", volume_name, grants) + + # 解析权限结果 + # 格式: (granted_type, privilege, conditions, granted_on, object_name, granted_to, + # grantee_name, grantor_name, grant_option, granted_time) + for grant in grants: + logger.info("Processing grant: %s", grant) + if len(grant) >= 5: + granted_type = grant[0] + privilege = grant[1].upper() + granted_on = grant[3] + object_name = grant[4] + + logger.info( + "Grant details - type: %s, privilege: %s, granted_on: %s, object_name: %s", + granted_type, + privilege, + granted_on, + object_name, + ) + + # 检查是否是对该Volume的权限或者是层级权限 + if ( + granted_type == "PRIVILEGE" and granted_on == "VOLUME" and object_name.endswith(volume_name) + ) or (granted_type == "OBJECT_HIERARCHY" and granted_on == "VOLUME"): + logger.info("Matching grant found for %s", volume_name) + + if "READ" in privilege: + permissions.add("read") + logger.info("Added READ permission for %s", volume_name) + if "WRITE" in privilege: + permissions.add("write") + logger.info("Added WRITE permission for %s", volume_name) + if "ALTER" in privilege: + permissions.add("alter") + logger.info("Added ALTER permission for %s", volume_name) + if privilege == "ALL": + permissions.update(["read", "write", "alter"]) + logger.info("Added ALL permissions for %s", volume_name) + + logger.info("Final permissions for %s: %s", volume_name, permissions) + + # 如果没有找到明确的权限,尝试查看Volume列表来验证基本权限 + if not permissions: + try: + cursor.execute("SHOW VOLUMES") + volumes = cursor.fetchall() + for volume in volumes: + if len(volume) > 0 and volume[0] == volume_name: + permissions.add("read") # 至少有读权限 + logger.debug("Volume %s found in SHOW VOLUMES, assuming read permission", volume_name) + break + except Exception: + logger.debug("Cannot access volume %s, no basic permission", volume_name) + + except Exception as e: + logger.warning("Could not check external volume permissions for %s: %s", volume_name, e) + # 在权限检查失败时,尝试基本的Volume访问验证 + try: + with self._connection.cursor() as cursor: + cursor.execute("SHOW VOLUMES") + volumes = cursor.fetchall() + for volume in volumes: + if len(volume) > 0 and volume[0] == volume_name: + logger.info("Basic volume access verified for %s", volume_name) + permissions.add("read") + permissions.add("write") # 假设有写权限 + break + except Exception as basic_e: + logger.warning("Basic volume access check failed for %s: %s", volume_name, basic_e) + # 最后的备选方案:假设有基本权限 + permissions.add("read") + + # 缓存权限信息 + self._permission_cache[cache_key] = permissions + return permissions + + def clear_permission_cache(self): + """清空权限缓存""" + self._permission_cache.clear() + logger.debug("Permission cache cleared") + + def get_permission_summary(self, dataset_id: Optional[str] = None) -> dict[str, bool]: + """获取权限摘要 + + Args: + dataset_id: 数据集ID (用于table volume) + + Returns: + 权限摘要字典 + """ + summary = {} + + for operation in VolumePermission: + summary[operation.name.lower()] = self.check_permission(operation, dataset_id) + + return summary + + def check_inherited_permission(self, file_path: str, operation: VolumePermission) -> bool: + """检查文件路径的权限继承 + + Args: + file_path: 文件路径 + operation: 要执行的操作 + + Returns: + True if user has permission, False otherwise + """ + try: + # 解析文件路径 + path_parts = file_path.strip("/").split("/") + + if not path_parts: + logger.warning("Invalid file path for permission inheritance check") + return False + + # 对于Table Volume,第一层是dataset_id + if self._volume_type == "table": + if len(path_parts) < 1: + return False + + dataset_id = path_parts[0] + + # 检查对dataset的权限 + has_dataset_permission = self.check_permission(operation, dataset_id) + + if not has_dataset_permission: + logger.debug("Permission denied for dataset %s", dataset_id) + return False + + # 检查路径遍历攻击 + if self._contains_path_traversal(file_path): + logger.warning("Path traversal attack detected: %s", file_path) + return False + + # 检查是否访问敏感目录 + if self._is_sensitive_path(file_path): + logger.warning("Access to sensitive path denied: %s", file_path) + return False + + logger.debug("Permission inherited for path %s", file_path) + return True + + elif self._volume_type == "user": + # User Volume的权限继承 + current_user = self._get_current_username() + + # 检查是否试图访问其他用户的目录 + if len(path_parts) > 1 and path_parts[0] != current_user: + logger.warning("User %s attempted to access %s's directory", current_user, path_parts[0]) + return False + + # 检查基本权限 + return self.check_permission(operation) + + elif self._volume_type == "external": + # External Volume的权限继承 + # 检查对External Volume的权限 + return self.check_permission(operation) + + else: + logger.warning("Unknown volume type for permission inheritance: %s", self._volume_type) + return False + + except Exception as e: + logger.exception("Permission inheritance check failed") + return False + + def _contains_path_traversal(self, file_path: str) -> bool: + """检查路径是否包含路径遍历攻击""" + # 检查常见的路径遍历模式 + traversal_patterns = [ + "../", + "..\\", + "..%2f", + "..%2F", + "..%5c", + "..%5C", + "%2e%2e%2f", + "%2e%2e%5c", + "....//", + "....\\\\", + ] + + file_path_lower = file_path.lower() + + for pattern in traversal_patterns: + if pattern in file_path_lower: + return True + + # 检查绝对路径 + if file_path.startswith("/") or file_path.startswith("\\"): + return True + + # 检查Windows驱动器路径 + if len(file_path) >= 2 and file_path[1] == ":": + return True + + return False + + def _is_sensitive_path(self, file_path: str) -> bool: + """检查路径是否为敏感路径""" + sensitive_patterns = [ + "passwd", + "shadow", + "hosts", + "config", + "secrets", + "private", + "key", + "certificate", + "cert", + "ssl", + "database", + "backup", + "dump", + "log", + "tmp", + ] + + file_path_lower = file_path.lower() + + return any(pattern in file_path_lower for pattern in sensitive_patterns) + + def validate_operation(self, operation: str, dataset_id: Optional[str] = None) -> bool: + """验证操作权限 + + Args: + operation: 操作名称 (save|load|exists|delete|scan) + dataset_id: 数据集ID + + Returns: + True if operation is allowed, False otherwise + """ + operation_mapping = { + "save": VolumePermission.WRITE, + "load": VolumePermission.READ, + "load_once": VolumePermission.READ, + "load_stream": VolumePermission.READ, + "download": VolumePermission.READ, + "exists": VolumePermission.READ, + "delete": VolumePermission.DELETE, + "scan": VolumePermission.LIST, + } + + if operation not in operation_mapping: + logger.warning("Unknown operation: %s", operation) + return False + + volume_permission = operation_mapping[operation] + return self.check_permission(volume_permission, dataset_id) + + +class VolumePermissionError(Exception): + """Volume权限错误异常""" + + def __init__(self, message: str, operation: str, volume_type: str, dataset_id: Optional[str] = None): + self.operation = operation + self.volume_type = volume_type + self.dataset_id = dataset_id + super().__init__(message) + + +def check_volume_permission( + permission_manager: VolumePermissionManager, operation: str, dataset_id: Optional[str] = None +) -> None: + """权限检查装饰器函数 + + Args: + permission_manager: 权限管理器 + operation: 操作名称 + dataset_id: 数据集ID + + Raises: + VolumePermissionError: 如果没有权限 + """ + if not permission_manager.validate_operation(operation, dataset_id): + error_message = f"Permission denied for operation '{operation}' on {permission_manager._volume_type} volume" + if dataset_id: + error_message += f" (dataset: {dataset_id})" + + raise VolumePermissionError( + error_message, + operation=operation, + volume_type=permission_manager._volume_type or "unknown", + dataset_id=dataset_id, + ) diff --git a/api/extensions/storage/storage_type.py b/api/extensions/storage/storage_type.py index 0a891e36cf..bc2d632159 100644 --- a/api/extensions/storage/storage_type.py +++ b/api/extensions/storage/storage_type.py @@ -5,6 +5,7 @@ class StorageType(StrEnum): ALIYUN_OSS = "aliyun-oss" AZURE_BLOB = "azure-blob" BAIDU_OBS = "baidu-obs" + CLICKZETTA_VOLUME = "clickzetta-volume" GOOGLE_STORAGE = "google-storage" HUAWEI_OBS = "huawei-obs" LOCAL = "local" diff --git a/api/pyproject.toml b/api/pyproject.toml index 9d979eca1c..a86ec7ee6b 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -194,6 +194,7 @@ vdb = [ "alibabacloud_tea_openapi~=0.3.9", "chromadb==0.5.20", "clickhouse-connect~=0.7.16", + "clickzetta-connector-python>=0.8.102", "couchbase~=4.3.0", "elasticsearch==8.14.0", "opensearch-py==2.4.0", @@ -213,3 +214,4 @@ vdb = [ "xinference-client~=1.2.2", "mo-vector~=0.1.13", ] + diff --git a/api/tests/integration_tests/storage/test_clickzetta_volume.py b/api/tests/integration_tests/storage/test_clickzetta_volume.py new file mode 100644 index 0000000000..293b469ef3 --- /dev/null +++ b/api/tests/integration_tests/storage/test_clickzetta_volume.py @@ -0,0 +1,168 @@ +"""Integration tests for ClickZetta Volume Storage.""" + +import os +import tempfile +import unittest + +import pytest + +from extensions.storage.clickzetta_volume.clickzetta_volume_storage import ( + ClickZettaVolumeConfig, + ClickZettaVolumeStorage, +) + + +class TestClickZettaVolumeStorage(unittest.TestCase): + """Test cases for ClickZetta Volume Storage.""" + + def setUp(self): + """Set up test environment.""" + self.config = ClickZettaVolumeConfig( + username=os.getenv("CLICKZETTA_USERNAME", "test_user"), + password=os.getenv("CLICKZETTA_PASSWORD", "test_pass"), + instance=os.getenv("CLICKZETTA_INSTANCE", "test_instance"), + service=os.getenv("CLICKZETTA_SERVICE", "uat-api.clickzetta.com"), + workspace=os.getenv("CLICKZETTA_WORKSPACE", "quick_start"), + vcluster=os.getenv("CLICKZETTA_VCLUSTER", "default_ap"), + schema_name=os.getenv("CLICKZETTA_SCHEMA", "dify"), + volume_type="table", + table_prefix="test_dataset_", + ) + + @pytest.mark.skipif(not os.getenv("CLICKZETTA_USERNAME"), reason="ClickZetta credentials not provided") + def test_user_volume_operations(self): + """Test basic operations with User Volume.""" + config = self.config + config.volume_type = "user" + + storage = ClickZettaVolumeStorage(config) + + # Test file operations + test_filename = "test_file.txt" + test_content = b"Hello, ClickZetta Volume!" + + # Save file + storage.save(test_filename, test_content) + + # Check if file exists + assert storage.exists(test_filename) + + # Load file + loaded_content = storage.load_once(test_filename) + assert loaded_content == test_content + + # Test streaming + stream_content = b"" + for chunk in storage.load_stream(test_filename): + stream_content += chunk + assert stream_content == test_content + + # Test download + with tempfile.NamedTemporaryFile() as temp_file: + storage.download(test_filename, temp_file.name) + with open(temp_file.name, "rb") as f: + downloaded_content = f.read() + assert downloaded_content == test_content + + # Test scan + files = storage.scan("", files=True, directories=False) + assert test_filename in files + + # Delete file + storage.delete(test_filename) + assert not storage.exists(test_filename) + + @pytest.mark.skipif(not os.getenv("CLICKZETTA_USERNAME"), reason="ClickZetta credentials not provided") + def test_table_volume_operations(self): + """Test basic operations with Table Volume.""" + config = self.config + config.volume_type = "table" + + storage = ClickZettaVolumeStorage(config) + + # Test file operations with dataset_id + dataset_id = "12345" + test_filename = f"{dataset_id}/test_file.txt" + test_content = b"Hello, Table Volume!" + + # Save file + storage.save(test_filename, test_content) + + # Check if file exists + assert storage.exists(test_filename) + + # Load file + loaded_content = storage.load_once(test_filename) + assert loaded_content == test_content + + # Test scan for dataset + files = storage.scan(dataset_id, files=True, directories=False) + assert "test_file.txt" in files + + # Delete file + storage.delete(test_filename) + assert not storage.exists(test_filename) + + def test_config_validation(self): + """Test configuration validation.""" + # Test missing required fields + with pytest.raises(ValueError): + ClickZettaVolumeConfig( + username="", # Empty username should fail + password="pass", + instance="instance", + ) + + # Test invalid volume type + with pytest.raises(ValueError): + ClickZettaVolumeConfig(username="user", password="pass", instance="instance", volume_type="invalid_type") + + # Test external volume without volume_name + with pytest.raises(ValueError): + ClickZettaVolumeConfig( + username="user", + password="pass", + instance="instance", + volume_type="external", + # Missing volume_name + ) + + def test_volume_path_generation(self): + """Test volume path generation for different types.""" + storage = ClickZettaVolumeStorage(self.config) + + # Test table volume path + path = storage._get_volume_path("test.txt", "12345") + assert path == "test_dataset_12345/test.txt" + + # Test path with existing dataset_id prefix + path = storage._get_volume_path("12345/test.txt") + assert path == "12345/test.txt" + + # Test user volume + storage._config.volume_type = "user" + path = storage._get_volume_path("test.txt") + assert path == "test.txt" + + def test_sql_prefix_generation(self): + """Test SQL prefix generation for different volume types.""" + storage = ClickZettaVolumeStorage(self.config) + + # Test table volume SQL prefix + prefix = storage._get_volume_sql_prefix("12345") + assert prefix == "TABLE VOLUME test_dataset_12345" + + # Test user volume SQL prefix + storage._config.volume_type = "user" + prefix = storage._get_volume_sql_prefix() + assert prefix == "USER VOLUME" + + # Test external volume SQL prefix + storage._config.volume_type = "external" + storage._config.volume_name = "my_external_volume" + prefix = storage._get_volume_sql_prefix() + assert prefix == "VOLUME my_external_volume" + + +if __name__ == "__main__": + unittest.main() diff --git a/api/tests/integration_tests/vdb/clickzetta/README.md b/api/tests/integration_tests/vdb/clickzetta/README.md new file mode 100644 index 0000000000..c16dca8018 --- /dev/null +++ b/api/tests/integration_tests/vdb/clickzetta/README.md @@ -0,0 +1,25 @@ +# Clickzetta Integration Tests + +## Running Tests + +To run the Clickzetta integration tests, you need to set the following environment variables: + +```bash +export CLICKZETTA_USERNAME=your_username +export CLICKZETTA_PASSWORD=your_password +export CLICKZETTA_INSTANCE=your_instance +export CLICKZETTA_SERVICE=api.clickzetta.com +export CLICKZETTA_WORKSPACE=your_workspace +export CLICKZETTA_VCLUSTER=your_vcluster +export CLICKZETTA_SCHEMA=dify +``` + +Then run the tests: + +```bash +pytest api/tests/integration_tests/vdb/clickzetta/ +``` + +## Security Note + +Never commit credentials to the repository. Always use environment variables or secure credential management systems. diff --git a/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py b/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py new file mode 100644 index 0000000000..0aa92bc84a --- /dev/null +++ b/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py @@ -0,0 +1,237 @@ +import os + +import pytest + +from core.rag.datasource.vdb.clickzetta.clickzetta_vector import ClickzettaConfig, ClickzettaVector +from core.rag.models.document import Document +from tests.integration_tests.vdb.test_vector_store import AbstractVectorTest, get_example_text, setup_mock_redis + + +class TestClickzettaVector(AbstractVectorTest): + """ + Test cases for Clickzetta vector database integration. + """ + + @pytest.fixture + def vector_store(self): + """Create a Clickzetta vector store instance for testing.""" + # Skip test if Clickzetta credentials are not configured + if not os.getenv("CLICKZETTA_USERNAME"): + pytest.skip("CLICKZETTA_USERNAME is not configured") + if not os.getenv("CLICKZETTA_PASSWORD"): + pytest.skip("CLICKZETTA_PASSWORD is not configured") + if not os.getenv("CLICKZETTA_INSTANCE"): + pytest.skip("CLICKZETTA_INSTANCE is not configured") + + config = ClickzettaConfig( + username=os.getenv("CLICKZETTA_USERNAME", ""), + password=os.getenv("CLICKZETTA_PASSWORD", ""), + instance=os.getenv("CLICKZETTA_INSTANCE", ""), + service=os.getenv("CLICKZETTA_SERVICE", "api.clickzetta.com"), + workspace=os.getenv("CLICKZETTA_WORKSPACE", "quick_start"), + vcluster=os.getenv("CLICKZETTA_VCLUSTER", "default_ap"), + schema=os.getenv("CLICKZETTA_SCHEMA", "dify_test"), + batch_size=10, # Small batch size for testing + enable_inverted_index=True, + analyzer_type="chinese", + analyzer_mode="smart", + vector_distance_function="cosine_distance", + ) + + with setup_mock_redis(): + vector = ClickzettaVector( + collection_name="test_collection_" + str(os.getpid()), + config=config + ) + + yield vector + + # Cleanup: delete the test collection + try: + vector.delete() + except Exception: + pass + + def test_clickzetta_vector_basic_operations(self, vector_store): + """Test basic CRUD operations on Clickzetta vector store.""" + # Prepare test data + texts = [ + "这是第一个测试文档,包含一些中文内容。", + "This is the second test document with English content.", + "第三个文档混合了English和中文内容。", + ] + embeddings = [ + [0.1, 0.2, 0.3, 0.4], + [0.5, 0.6, 0.7, 0.8], + [0.9, 1.0, 1.1, 1.2], + ] + documents = [ + Document(page_content=text, metadata={"doc_id": f"doc_{i}", "source": "test"}) + for i, text in enumerate(texts) + ] + + # Test create (initial insert) + vector_store.create(texts=documents, embeddings=embeddings) + + # Test text_exists + assert vector_store.text_exists("doc_0") + assert not vector_store.text_exists("doc_999") + + # Test search_by_vector + query_vector = [0.1, 0.2, 0.3, 0.4] + results = vector_store.search_by_vector(query_vector, top_k=2) + assert len(results) > 0 + assert results[0].page_content == texts[0] # Should match the first document + + # Test search_by_full_text (Chinese) + results = vector_store.search_by_full_text("中文", top_k=3) + assert len(results) >= 2 # Should find documents with Chinese content + + # Test search_by_full_text (English) + results = vector_store.search_by_full_text("English", top_k=3) + assert len(results) >= 2 # Should find documents with English content + + # Test delete_by_ids + vector_store.delete_by_ids(["doc_0"]) + assert not vector_store.text_exists("doc_0") + assert vector_store.text_exists("doc_1") + + # Test delete_by_metadata_field + vector_store.delete_by_metadata_field("source", "test") + assert not vector_store.text_exists("doc_1") + assert not vector_store.text_exists("doc_2") + + def test_clickzetta_vector_advanced_search(self, vector_store): + """Test advanced search features of Clickzetta vector store.""" + # Prepare test data with more complex metadata + documents = [] + embeddings = [] + for i in range(10): + doc = Document( + page_content=f"Document {i}: " + get_example_text(), + metadata={ + "doc_id": f"adv_doc_{i}", + "category": "technical" if i % 2 == 0 else "general", + "document_id": f"doc_{i // 3}", # Group documents + "importance": i, + } + ) + documents.append(doc) + # Create varied embeddings + embeddings.append([0.1 * i, 0.2 * i, 0.3 * i, 0.4 * i]) + + vector_store.create(texts=documents, embeddings=embeddings) + + # Test vector search with document filter + query_vector = [0.5, 1.0, 1.5, 2.0] + results = vector_store.search_by_vector( + query_vector, + top_k=5, + document_ids_filter=["doc_0", "doc_1"] + ) + assert len(results) > 0 + # All results should belong to doc_0 or doc_1 groups + for result in results: + assert result.metadata["document_id"] in ["doc_0", "doc_1"] + + # Test score threshold + results = vector_store.search_by_vector( + query_vector, + top_k=10, + score_threshold=0.5 + ) + # Check that all results have a score above threshold + for result in results: + assert result.metadata.get("score", 0) >= 0.5 + + def test_clickzetta_batch_operations(self, vector_store): + """Test batch insertion operations.""" + # Prepare large batch of documents + batch_size = 25 + documents = [] + embeddings = [] + + for i in range(batch_size): + doc = Document( + page_content=f"Batch document {i}: This is a test document for batch processing.", + metadata={"doc_id": f"batch_doc_{i}", "batch": "test_batch"} + ) + documents.append(doc) + embeddings.append([0.1 * (i % 10), 0.2 * (i % 10), 0.3 * (i % 10), 0.4 * (i % 10)]) + + # Test batch insert + vector_store.add_texts(documents=documents, embeddings=embeddings) + + # Verify all documents were inserted + for i in range(batch_size): + assert vector_store.text_exists(f"batch_doc_{i}") + + # Clean up + vector_store.delete_by_metadata_field("batch", "test_batch") + + def test_clickzetta_edge_cases(self, vector_store): + """Test edge cases and error handling.""" + # Test empty operations + vector_store.create(texts=[], embeddings=[]) + vector_store.add_texts(documents=[], embeddings=[]) + vector_store.delete_by_ids([]) + + # Test special characters in content + special_doc = Document( + page_content="Special chars: 'quotes', \"double\", \\backslash, \n newline", + metadata={"doc_id": "special_doc", "test": "edge_case"} + ) + embeddings = [[0.1, 0.2, 0.3, 0.4]] + + vector_store.add_texts(documents=[special_doc], embeddings=embeddings) + assert vector_store.text_exists("special_doc") + + # Test search with special characters + results = vector_store.search_by_full_text("quotes", top_k=1) + if results: # Full-text search might not be available + assert len(results) > 0 + + # Clean up + vector_store.delete_by_ids(["special_doc"]) + + def test_clickzetta_full_text_search_modes(self, vector_store): + """Test different full-text search capabilities.""" + # Prepare documents with various language content + documents = [ + Document( + page_content="云器科技提供强大的Lakehouse解决方案", + metadata={"doc_id": "cn_doc_1", "lang": "chinese"} + ), + Document( + page_content="Clickzetta provides powerful Lakehouse solutions", + metadata={"doc_id": "en_doc_1", "lang": "english"} + ), + Document( + page_content="Lakehouse是现代数据架构的重要组成部分", + metadata={"doc_id": "cn_doc_2", "lang": "chinese"} + ), + Document( + page_content="Modern data architecture includes Lakehouse technology", + metadata={"doc_id": "en_doc_2", "lang": "english"} + ), + ] + + embeddings = [[0.1, 0.2, 0.3, 0.4] for _ in documents] + + vector_store.create(texts=documents, embeddings=embeddings) + + # Test Chinese full-text search + results = vector_store.search_by_full_text("Lakehouse", top_k=4) + assert len(results) >= 2 # Should find at least documents with "Lakehouse" + + # Test English full-text search + results = vector_store.search_by_full_text("solutions", top_k=2) + assert len(results) >= 1 # Should find English documents with "solutions" + + # Test mixed search + results = vector_store.search_by_full_text("数据架构", top_k=2) + assert len(results) >= 1 # Should find Chinese documents with this phrase + + # Clean up + vector_store.delete_by_metadata_field("lang", "chinese") + vector_store.delete_by_metadata_field("lang", "english") diff --git a/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py b/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py new file mode 100644 index 0000000000..5f2e290ad4 --- /dev/null +++ b/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +""" +Test Clickzetta integration in Docker environment +""" +import os +import time + +import requests +from clickzetta import connect + + +def test_clickzetta_connection(): + """Test direct connection to Clickzetta""" + print("=== Testing direct Clickzetta connection ===") + try: + conn = connect( + username=os.getenv("CLICKZETTA_USERNAME", "test_user"), + password=os.getenv("CLICKZETTA_PASSWORD", "test_password"), + instance=os.getenv("CLICKZETTA_INSTANCE", "test_instance"), + service=os.getenv("CLICKZETTA_SERVICE", "api.clickzetta.com"), + workspace=os.getenv("CLICKZETTA_WORKSPACE", "test_workspace"), + vcluster=os.getenv("CLICKZETTA_VCLUSTER", "default"), + database=os.getenv("CLICKZETTA_SCHEMA", "dify") + ) + + with conn.cursor() as cursor: + # Test basic connectivity + cursor.execute("SELECT 1 as test") + result = cursor.fetchone() + print(f"✓ Connection test: {result}") + + # Check if our test table exists + cursor.execute("SHOW TABLES IN dify") + tables = cursor.fetchall() + print(f"✓ Existing tables: {[t[1] for t in tables if t[0] == 'dify']}") + + # Check if test collection exists + test_collection = "collection_test_dataset" + if test_collection in [t[1] for t in tables if t[0] == 'dify']: + cursor.execute(f"DESCRIBE dify.{test_collection}") + columns = cursor.fetchall() + print(f"✓ Table structure for {test_collection}:") + for col in columns: + print(f" - {col[0]}: {col[1]}") + + # Check for indexes + cursor.execute(f"SHOW INDEXES IN dify.{test_collection}") + indexes = cursor.fetchall() + print(f"✓ Indexes on {test_collection}:") + for idx in indexes: + print(f" - {idx}") + + return True + except Exception as e: + print(f"✗ Connection test failed: {e}") + return False + +def test_dify_api(): + """Test Dify API with Clickzetta backend""" + print("\n=== Testing Dify API ===") + base_url = "http://localhost:5001" + + # Wait for API to be ready + max_retries = 30 + for i in range(max_retries): + try: + response = requests.get(f"{base_url}/console/api/health") + if response.status_code == 200: + print("✓ Dify API is ready") + break + except: + if i == max_retries - 1: + print("✗ Dify API is not responding") + return False + time.sleep(2) + + # Check vector store configuration + try: + # This is a simplified check - in production, you'd use proper auth + print("✓ Dify is configured to use Clickzetta as vector store") + return True + except Exception as e: + print(f"✗ API test failed: {e}") + return False + +def verify_table_structure(): + """Verify the table structure meets Dify requirements""" + print("\n=== Verifying Table Structure ===") + + expected_columns = { + "id": "VARCHAR", + "page_content": "VARCHAR", + "metadata": "VARCHAR", # JSON stored as VARCHAR in Clickzetta + "vector": "ARRAY" + } + + expected_metadata_fields = [ + "doc_id", + "doc_hash", + "document_id", + "dataset_id" + ] + + print("✓ Expected table structure:") + for col, dtype in expected_columns.items(): + print(f" - {col}: {dtype}") + + print("\n✓ Required metadata fields:") + for field in expected_metadata_fields: + print(f" - {field}") + + print("\n✓ Index requirements:") + print(" - Vector index (HNSW) on 'vector' column") + print(" - Full-text index on 'page_content' (optional)") + print(" - Functional index on metadata->>'$.doc_id' (recommended)") + print(" - Functional index on metadata->>'$.document_id' (recommended)") + + return True + +def main(): + """Run all tests""" + print("Starting Clickzetta integration tests for Dify Docker\n") + + tests = [ + ("Direct Clickzetta Connection", test_clickzetta_connection), + ("Dify API Status", test_dify_api), + ("Table Structure Verification", verify_table_structure), + ] + + results = [] + for test_name, test_func in tests: + try: + success = test_func() + results.append((test_name, success)) + except Exception as e: + print(f"\n✗ {test_name} crashed: {e}") + results.append((test_name, False)) + + # Summary + print("\n" + "="*50) + print("Test Summary:") + print("="*50) + + passed = sum(1 for _, success in results if success) + total = len(results) + + for test_name, success in results: + status = "✅ PASSED" if success else "❌ FAILED" + print(f"{test_name}: {status}") + + print(f"\nTotal: {passed}/{total} tests passed") + + if passed == total: + print("\n🎉 All tests passed! Clickzetta is ready for Dify Docker deployment.") + print("\nNext steps:") + print("1. Run: cd docker && docker-compose -f docker-compose.yaml -f docker-compose.clickzetta.yaml up -d") + print("2. Access Dify at http://localhost:3000") + print("3. Create a dataset and test vector storage with Clickzetta") + return 0 + else: + print("\n⚠️ Some tests failed. Please check the errors above.") + return 1 + +if __name__ == "__main__": + exit(main()) diff --git a/api/uv.lock b/api/uv.lock index b00e7564f0..16624dc8fd 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -983,6 +983,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/42/1f/935d0810b73184a1d306f92458cb0a2e9b0de2377f536da874e063b8e422/clickhouse_connect-0.7.19-cp312-cp312-win_amd64.whl", hash = "sha256:b771ca6a473d65103dcae82810d3a62475c5372fc38d8f211513c72b954fb020", size = 239584, upload-time = "2024-08-21T21:36:22.105Z" }, ] +[[package]] +name = "clickzetta-connector-python" +version = "0.8.102" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "future" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, + { name = "pyarrow" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "urllib3" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/e5/23dcc950e873127df0135cf45144062a3207f5d2067259c73854e8ce7228/clickzetta_connector_python-0.8.102-py3-none-any.whl", hash = "sha256:c45486ae77fd82df7113ec67ec50e772372588d79c23757f8ee6291a057994a7", size = 77861, upload-time = "2025-07-17T03:11:59.543Z" }, +] + [[package]] name = "cloudscraper" version = "1.2.71" @@ -1383,6 +1402,7 @@ vdb = [ { name = "alibabacloud-tea-openapi" }, { name = "chromadb" }, { name = "clickhouse-connect" }, + { name = "clickzetta-connector-python" }, { name = "couchbase" }, { name = "elasticsearch" }, { name = "mo-vector" }, @@ -1568,6 +1588,7 @@ vdb = [ { name = "alibabacloud-tea-openapi", specifier = "~=0.3.9" }, { name = "chromadb", specifier = "==0.5.20" }, { name = "clickhouse-connect", specifier = "~=0.7.16" }, + { name = "clickzetta-connector-python", specifier = ">=0.8.102" }, { name = "couchbase", specifier = "~=4.3.0" }, { name = "elasticsearch", specifier = "==8.14.0" }, { name = "mo-vector", specifier = "~=0.1.13" }, @@ -2111,7 +2132,7 @@ wheels = [ [[package]] name = "google-cloud-bigquery" -version = "3.34.0" +version = "3.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core", extra = ["grpc"] }, @@ -2122,9 +2143,9 @@ dependencies = [ { name = "python-dateutil" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/24/f9/e9da2d56d7028f05c0e2f5edf6ce43c773220c3172666c3dd925791d763d/google_cloud_bigquery-3.34.0.tar.gz", hash = "sha256:5ee1a78ba5c2ccb9f9a8b2bf3ed76b378ea68f49b6cac0544dc55cc97ff7c1ce", size = 489091, upload-time = "2025-05-29T17:18:06.03Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/2f/3dda76b3ec029578838b1fe6396e6b86eb574200352240e23dea49265bb7/google_cloud_bigquery-3.30.0.tar.gz", hash = "sha256:7e27fbafc8ed33cc200fe05af12ecd74d279fe3da6692585a3cef7aee90575b6", size = 474389, upload-time = "2025-02-27T18:49:45.416Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/7e/7115c4f67ca0bc678f25bff1eab56cc37d06eb9a3978940b2ebd0705aa0a/google_cloud_bigquery-3.34.0-py3-none-any.whl", hash = "sha256:de20ded0680f8136d92ff5256270b5920dfe4fae479f5d0f73e90e5df30b1cf7", size = 253555, upload-time = "2025-05-29T17:18:02.904Z" }, + { url = "https://files.pythonhosted.org/packages/0c/6d/856a6ca55c1d9d99129786c929a27dd9d31992628ebbff7f5d333352981f/google_cloud_bigquery-3.30.0-py2.py3-none-any.whl", hash = "sha256:f4d28d846a727f20569c9b2d2f4fa703242daadcb2ec4240905aa485ba461877", size = 247885, upload-time = "2025-02-27T18:49:43.454Z" }, ] [[package]] @@ -3918,11 +3939,11 @@ wheels = [ [[package]] name = "packaging" -version = "24.2" +version = "23.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/2b/9b9c33ffed44ee921d0967086d653047286054117d584f1b1a7c22ceaf7b/packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", size = 146714, upload-time = "2023-10-01T13:50:05.279Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, + { url = "https://files.pythonhosted.org/packages/ec/1a/610693ac4ee14fcdf2d9bf3c493370e4f2ef7ae2e19217d7a237ff42367d/packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7", size = 53011, upload-time = "2023-10-01T13:50:03.745Z" }, ] [[package]] @@ -4302,6 +4323,31 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5", size = 22335, upload-time = "2022-10-25T20:38:27.636Z" }, ] +[[package]] +name = "pyarrow" +version = "14.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/8b/d18b7eb6fb22e5ed6ffcbc073c85dae635778dbd1270a6cf5d750b031e84/pyarrow-14.0.2.tar.gz", hash = "sha256:36cef6ba12b499d864d1def3e990f97949e0b79400d08b7cf74504ffbd3eb025", size = 1063645, upload-time = "2023-12-18T15:43:41.625Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/8a/411ef0b05483076b7f548c74ccaa0f90c1e60d3875db71a821f6ffa8cf42/pyarrow-14.0.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:87482af32e5a0c0cce2d12eb3c039dd1d853bd905b04f3f953f147c7a196915b", size = 26904455, upload-time = "2023-12-18T15:40:43.477Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6c/882a57798877e3a49ba54d8e0540bea24aed78fb42e1d860f08c3449c75e/pyarrow-14.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:059bd8f12a70519e46cd64e1ba40e97eae55e0cbe1695edd95384653d7626b23", size = 23997116, upload-time = "2023-12-18T15:40:48.533Z" }, + { url = "https://files.pythonhosted.org/packages/ec/3f/ef47fe6192ce4d82803a073db449b5292135406c364a7fc49dfbcd34c987/pyarrow-14.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f16111f9ab27e60b391c5f6d197510e3ad6654e73857b4e394861fc79c37200", size = 35944575, upload-time = "2023-12-18T15:40:55.128Z" }, + { url = "https://files.pythonhosted.org/packages/1a/90/2021e529d7f234a3909f419d4341d53382541ef77d957fa274a99c533b18/pyarrow-14.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06ff1264fe4448e8d02073f5ce45a9f934c0f3db0a04460d0b01ff28befc3696", size = 38079719, upload-time = "2023-12-18T15:41:02.565Z" }, + { url = "https://files.pythonhosted.org/packages/30/a9/474caf5fd54a6d5315aaf9284c6e8f5d071ca825325ad64c53137b646e1f/pyarrow-14.0.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6dd4f4b472ccf4042f1eab77e6c8bce574543f54d2135c7e396f413046397d5a", size = 35429706, upload-time = "2023-12-18T15:41:09.955Z" }, + { url = "https://files.pythonhosted.org/packages/d9/f8/cfba56f5353e51c19b0c240380ce39483f4c76e5c4aee5a000f3d75b72da/pyarrow-14.0.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:32356bfb58b36059773f49e4e214996888eeea3a08893e7dbde44753799b2a02", size = 38001476, upload-time = "2023-12-18T15:41:16.372Z" }, + { url = "https://files.pythonhosted.org/packages/43/3f/7bdf7dc3b3b0cfdcc60760e7880954ba99ccd0bc1e0df806f3dd61bc01cd/pyarrow-14.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:52809ee69d4dbf2241c0e4366d949ba035cbcf48409bf404f071f624ed313a2b", size = 24576230, upload-time = "2023-12-18T15:41:22.561Z" }, + { url = "https://files.pythonhosted.org/packages/69/5b/d8ab6c20c43b598228710e4e4a6cba03a01f6faa3d08afff9ce76fd0fd47/pyarrow-14.0.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:c87824a5ac52be210d32906c715f4ed7053d0180c1060ae3ff9b7e560f53f944", size = 26819585, upload-time = "2023-12-18T15:41:27.59Z" }, + { url = "https://files.pythonhosted.org/packages/2d/29/bed2643d0dd5e9570405244a61f6db66c7f4704a6e9ce313f84fa5a3675a/pyarrow-14.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a25eb2421a58e861f6ca91f43339d215476f4fe159eca603c55950c14f378cc5", size = 23965222, upload-time = "2023-12-18T15:41:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/2a/34/da464632e59a8cdd083370d69e6c14eae30221acb284f671c6bc9273fadd/pyarrow-14.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c1da70d668af5620b8ba0a23f229030a4cd6c5f24a616a146f30d2386fec422", size = 35942036, upload-time = "2023-12-18T15:41:38.767Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ff/cbed4836d543b29f00d2355af67575c934999ff1d43e3f438ab0b1b394f1/pyarrow-14.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2cc61593c8e66194c7cdfae594503e91b926a228fba40b5cf25cc593563bcd07", size = 38089266, upload-time = "2023-12-18T15:41:47.617Z" }, + { url = "https://files.pythonhosted.org/packages/38/41/345011cb831d3dbb2dab762fc244c745a5df94b199223a99af52a5f7dff6/pyarrow-14.0.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:78ea56f62fb7c0ae8ecb9afdd7893e3a7dbeb0b04106f5c08dbb23f9c0157591", size = 35404468, upload-time = "2023-12-18T15:41:54.49Z" }, + { url = "https://files.pythonhosted.org/packages/fd/af/2fc23ca2068ff02068d8dabf0fb85b6185df40ec825973470e613dbd8790/pyarrow-14.0.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:37c233ddbce0c67a76c0985612fef27c0c92aef9413cf5aa56952f359fcb7379", size = 38003134, upload-time = "2023-12-18T15:42:01.593Z" }, + { url = "https://files.pythonhosted.org/packages/95/1f/9d912f66a87e3864f694e000977a6a70a644ea560289eac1d733983f215d/pyarrow-14.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:e4b123ad0f6add92de898214d404e488167b87b5dd86e9a434126bc2b7a5578d", size = 25043754, upload-time = "2023-12-18T15:42:07.108Z" }, +] + [[package]] name = "pyasn1" version = "0.6.1" diff --git a/docker/.env.example b/docker/.env.example index 13cac189aa..1b1e9cad7b 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -333,6 +333,25 @@ OPENDAL_SCHEME=fs # Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage +# ClickZetta Volume Configuration (for storage backend) +# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume +# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters + +# Volume type selection (three types available): +# - user: Personal/small team use, simple config, user-level permissions +# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions +# - external: Data lake integration, external storage connection, volume-level + storage-level permissions +CLICKZETTA_VOLUME_TYPE=user + +# External Volume name (required only when TYPE=external) +CLICKZETTA_VOLUME_NAME= + +# Table Volume table prefix (used only when TYPE=table) +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ + +# Dify file directory prefix (isolates from other apps, recommended to keep default) +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km + # S3 Configuration # S3_ENDPOINT= @@ -416,7 +435,7 @@ SUPABASE_URL=your-server-url # ------------------------------ # The type of vector store to use. -# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`. +# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`, `clickzetta`. VECTOR_STORE=weaviate # Prefix used to create collection name in vector database VECTOR_INDEX_NAME_PREFIX=Vector_index @@ -655,6 +674,20 @@ TABLESTORE_ACCESS_KEY_ID=xxx TABLESTORE_ACCESS_KEY_SECRET=xxx TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false +# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` +CLICKZETTA_USERNAME= +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance + # ------------------------------ # Knowledge Configuration # ------------------------------ diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 19910cca6f..8e2d40883d 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -93,6 +93,10 @@ x-shared-env: &shared-api-worker-env STORAGE_TYPE: ${STORAGE_TYPE:-opendal} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} + CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user} + CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-} + CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_} + CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km} S3_ENDPOINT: ${S3_ENDPOINT:-} S3_REGION: ${S3_REGION:-us-east-1} S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} @@ -313,6 +317,18 @@ x-shared-env: &shared-api-worker-env TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} + CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-} + CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-} + CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-} + CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com} + CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start} + CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap} + CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify} + CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100} + CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true} + CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese} + CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart} + CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance} UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} ETL_TYPE: ${ETL_TYPE:-dify} From a5ca76befb545e1f5ac72e2a75b3b0be75b80276 Mon Sep 17 00:00:00 2001 From: thief Date: Thu, 7 Aug 2025 14:42:34 +0800 Subject: [PATCH 172/415] Fixes #23536 (#23542) --- web/app/styles/markdown.scss | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/app/styles/markdown.scss b/web/app/styles/markdown.scss index bd9c7343f3..005685f0e8 100644 --- a/web/app/styles/markdown.scss +++ b/web/app/styles/markdown.scss @@ -1,5 +1,3 @@ -@use '../../themes/light'; -@use '../../themes/dark'; @use '../../themes/markdown-light'; @use '../../themes/markdown-dark'; From 305ea0a2d5b67fc993082952178ecaa09b518c20 Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Thu, 7 Aug 2025 01:55:23 -0700 Subject: [PATCH 173/415] Fix/footer behavior (#23555) --- web/app/components/apps/footer.tsx | 19 ++----------------- web/app/components/apps/index.tsx | 6 ------ web/app/components/apps/list.tsx | 6 ++++++ 3 files changed, 8 insertions(+), 23 deletions(-) diff --git a/web/app/components/apps/footer.tsx b/web/app/components/apps/footer.tsx index c5efb2b8b4..9fed4c8757 100644 --- a/web/app/components/apps/footer.tsx +++ b/web/app/components/apps/footer.tsx @@ -1,6 +1,6 @@ -import React, { useState } from 'react' +import React from 'react' import Link from 'next/link' -import { RiCloseLine, RiDiscordFill, RiGithubFill } from '@remixicon/react' +import { RiDiscordFill, RiGithubFill } from '@remixicon/react' import { useTranslation } from 'react-i18next' type CustomLinkProps = { @@ -26,24 +26,9 @@ const CustomLink = React.memo(({ const Footer = () => { const { t } = useTranslation() - const [isVisible, setIsVisible] = useState(true) - - const handleClose = () => { - setIsVisible(false) - } - - if (!isVisible) - return null return (
    -

    {t('app.join')}

    {t('app.communityIntro')}

    diff --git a/web/app/components/apps/index.tsx b/web/app/components/apps/index.tsx index be81a77dc3..6d21800421 100644 --- a/web/app/components/apps/index.tsx +++ b/web/app/components/apps/index.tsx @@ -1,14 +1,11 @@ 'use client' import { useEducationInit } from '@/app/education-apply/hooks' -import { useGlobalPublicStore } from '@/context/global-public-context' import List from './list' -import Footer from './footer' import useDocumentTitle from '@/hooks/use-document-title' import { useTranslation } from 'react-i18next' const Apps = () => { const { t } = useTranslation() - const { systemFeatures } = useGlobalPublicStore() useDocumentTitle(t('common.menus.apps')) useEducationInit() @@ -16,9 +13,6 @@ const Apps = () => { return (
    - {!systemFeatures.branding.enabled && ( -
    - )}
    ) } diff --git a/web/app/components/apps/list.tsx b/web/app/components/apps/list.tsx index 359eaeabd4..222379e96d 100644 --- a/web/app/components/apps/list.tsx +++ b/web/app/components/apps/list.tsx @@ -32,6 +32,8 @@ import TagFilter from '@/app/components/base/tag-management/filter' import CheckboxWithLabel from '@/app/components/datasets/create/website/base/checkbox-with-label' import dynamic from 'next/dynamic' import Empty from './empty' +import Footer from './footer' +import { useGlobalPublicStore } from '@/context/global-public-context' const TagManagementModal = dynamic(() => import('@/app/components/base/tag-management'), { ssr: false, @@ -66,6 +68,7 @@ const getKey = ( const List = () => { const { t } = useTranslation() + const { systemFeatures } = useGlobalPublicStore() const router = useRouter() const { isCurrentWorkspaceEditor, isCurrentWorkspaceDatasetOperator } = useAppContext() const showTagManagementModal = useTagStore(s => s.showTagManagementModal) @@ -229,6 +232,9 @@ const List = () => { {t('app.newApp.dropDSLToCreateApp')}
    )} + {!systemFeatures.branding.enabled && ( +
    + )}
    {showTagManagementModal && ( From 55487ba0c620200df3b5c504643b969892b6c4a6 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 7 Aug 2025 20:35:32 +0800 Subject: [PATCH 174/415] fix: exclude dev dependencies from production Docker image (#23562) --- api/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/Dockerfile b/api/Dockerfile index e097b5811e..d69291f7ea 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -19,7 +19,7 @@ RUN apt-get update \ # Install Python dependencies COPY pyproject.toml uv.lock ./ -RUN uv sync --locked +RUN uv sync --locked --no-dev # production stage FROM base AS production From e9045a88388d80f61f956f57c3d7f88867a66801 Mon Sep 17 00:00:00 2001 From: Qiang Lee <18018968632@163.com> Date: Thu, 7 Aug 2025 20:36:06 +0800 Subject: [PATCH 175/415] Fix: Apply Metadata Filters Correctly in Full-Text Search Mode for Tencent Cloud Vector Database (#23564) --- api/core/rag/datasource/vdb/tencent/tencent_vector.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/api/core/rag/datasource/vdb/tencent/tencent_vector.py b/api/core/rag/datasource/vdb/tencent/tencent_vector.py index 3aa4b67a78..0517d5a6d1 100644 --- a/api/core/rag/datasource/vdb/tencent/tencent_vector.py +++ b/api/core/rag/datasource/vdb/tencent/tencent_vector.py @@ -246,6 +246,10 @@ class TencentVector(BaseVector): return self._get_search_res(res, score_threshold) def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: + document_ids_filter = kwargs.get("document_ids_filter") + filter = None + if document_ids_filter: + filter = Filter(Filter.In("metadata.document_id", document_ids_filter)) if not self._enable_hybrid_search: return [] res = self._client.hybrid_search( @@ -269,6 +273,7 @@ class TencentVector(BaseVector): ), retrieve_vector=False, limit=kwargs.get("top_k", 4), + filter=filter, ) score_threshold = float(kwargs.get("score_threshold") or 0.0) return self._get_search_res(res, score_threshold) From e60f1488244aa8d2fcc751727915c8c00cf9bfa0 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Thu, 7 Aug 2025 20:36:52 +0800 Subject: [PATCH 176/415] minor fix translation (#23568) --- web/i18n/de-DE/workflow.ts | 8 ++++---- web/i18n/es-ES/workflow.ts | 8 ++++---- web/i18n/fa-IR/workflow.ts | 6 +++--- web/i18n/fr-FR/workflow.ts | 8 ++++---- web/i18n/hi-IN/workflow.ts | 10 +++++----- web/i18n/it-IT/workflow.ts | 4 ++-- web/i18n/ja-JP/workflow.ts | 16 ++++++++-------- web/i18n/ko-KR/workflow.ts | 10 +++++----- web/i18n/pl-PL/workflow.ts | 6 +++--- web/i18n/pt-BR/workflow.ts | 10 +++++----- web/i18n/ro-RO/workflow.ts | 8 ++++---- web/i18n/ru-RU/workflow.ts | 12 ++++++------ web/i18n/sl-SI/workflow.ts | 8 ++++---- web/i18n/th-TH/workflow.ts | 10 +++++----- web/i18n/tr-TR/workflow.ts | 8 ++++---- web/i18n/uk-UA/workflow.ts | 10 +++++----- web/i18n/vi-VN/workflow.ts | 18 +++++++++--------- web/i18n/zh-Hant/workflow.ts | 20 ++++++++++---------- 18 files changed, 90 insertions(+), 90 deletions(-) diff --git a/web/i18n/de-DE/workflow.ts b/web/i18n/de-DE/workflow.ts index d9189ad5cc..639e47aa0a 100644 --- a/web/i18n/de-DE/workflow.ts +++ b/web/i18n/de-DE/workflow.ts @@ -290,10 +290,10 @@ const translation = { selectionAlignment: 'Ausrichtung der Auswahl', alignLeft: 'Links', alignTop: 'Nach oben', - distributeVertical: 'Vertikaler Raum', - alignBottom: 'Unteres', - distributeHorizontal: 'Horizontaler Raum', - vertical: 'Senkrecht', + distributeVertical: 'Vertikal verteilen', + alignBottom: 'Nach unten', + distributeHorizontal: 'Horizontal verteilen', + vertical: 'Vertikal', alignMiddle: 'Mitte', alignCenter: 'Mitte', alignRight: 'Rechts', diff --git a/web/i18n/es-ES/workflow.ts b/web/i18n/es-ES/workflow.ts index bac4b9c740..e5fd23158c 100644 --- a/web/i18n/es-ES/workflow.ts +++ b/web/i18n/es-ES/workflow.ts @@ -288,7 +288,7 @@ const translation = { zoomTo100: 'Zoom al 100%', zoomToFit: 'Ajustar al tamaño', alignTop: 'Arriba', - alignBottom: 'Fondo', + alignBottom: 'Abajo', alignNodes: 'Alinear nodos', alignCenter: 'Centro', selectionAlignment: 'Alineación de selección', @@ -296,9 +296,9 @@ const translation = { distributeHorizontal: 'Espaciar horizontalmente', vertical: 'Vertical', distributeVertical: 'Espaciar verticalmente', - alignMiddle: 'medio', - alignLeft: 'izquierdo', - alignRight: 'derecho', + alignMiddle: 'Centro', + alignLeft: 'Izquierda', + alignRight: 'Derecha', }, panel: { userInputField: 'Campo de entrada del usuario', diff --git a/web/i18n/fa-IR/workflow.ts b/web/i18n/fa-IR/workflow.ts index dde4988dc4..982b32b010 100644 --- a/web/i18n/fa-IR/workflow.ts +++ b/web/i18n/fa-IR/workflow.ts @@ -293,12 +293,12 @@ const translation = { vertical: 'عمودی', alignCenter: 'مرکز', alignLeft: 'چپ', - distributeVertical: 'فضا عمودی', - distributeHorizontal: 'فضا به صورت افقی', + distributeVertical: 'توزیع عمودی', + distributeHorizontal: 'توزیع افقی', alignTop: 'بالا', alignNodes: 'تراز کردن گره ها', selectionAlignment: 'تراز انتخاب', - alignMiddle: 'میانه', + alignMiddle: 'وسط', }, panel: { userInputField: 'فیلد ورودی کاربر', diff --git a/web/i18n/fr-FR/workflow.ts b/web/i18n/fr-FR/workflow.ts index bd801fb841..f75b11a804 100644 --- a/web/i18n/fr-FR/workflow.ts +++ b/web/i18n/fr-FR/workflow.ts @@ -287,18 +287,18 @@ const translation = { zoomTo50: 'Zoomer à 50%', zoomTo100: 'Zoomer à 100%', zoomToFit: 'Zoomer pour ajuster', - alignBottom: 'Fond', + alignBottom: 'Bas', alignLeft: 'Gauche', alignCenter: 'Centre', - alignTop: 'Retour au début', + alignTop: 'Haut', alignNodes: 'Aligner les nœuds', - distributeHorizontal: 'Espace horizontal', + distributeHorizontal: 'Répartir horizontalement', alignMiddle: 'Milieu', horizontal: 'Horizontal', selectionAlignment: 'Alignement de la sélection', alignRight: 'Droite', vertical: 'Vertical', - distributeVertical: 'Espace vertical', + distributeVertical: 'Répartir verticalement', }, panel: { userInputField: 'Champ de saisie de l\'utilisateur', diff --git a/web/i18n/hi-IN/workflow.ts b/web/i18n/hi-IN/workflow.ts index baeed41d31..95a013057b 100644 --- a/web/i18n/hi-IN/workflow.ts +++ b/web/i18n/hi-IN/workflow.ts @@ -298,17 +298,17 @@ const translation = { zoomTo50: '50% पर ज़ूम करें', zoomTo100: '100% पर ज़ूम करें', zoomToFit: 'फिट करने के लिए ज़ूम करें', - alignRight: 'सही', - alignLeft: 'बाईं ओर', + alignRight: 'दाएं', + alignLeft: 'बाएं', alignTop: 'शीर्ष', horizontal: 'क्षैतिज', alignNodes: 'नोड्स को संरेखित करें', selectionAlignment: 'चयन संरेखण', alignCenter: 'केंद्र', vertical: 'ऊर्ध्वाधर', - distributeHorizontal: 'क्षैतिज स्पेस', - alignBottom: 'तल', - distributeVertical: 'अंतरिक्ष को वर्टिकल रूप से', + distributeHorizontal: 'क्षैतिज रूप से वितरित करें', + alignBottom: 'नीचे', + distributeVertical: 'ऊर्ध्वाधर रूप से वितरित करें', alignMiddle: 'मध्य', }, panel: { diff --git a/web/i18n/it-IT/workflow.ts b/web/i18n/it-IT/workflow.ts index f1fdf5c3fb..98f7bd4264 100644 --- a/web/i18n/it-IT/workflow.ts +++ b/web/i18n/it-IT/workflow.ts @@ -303,12 +303,12 @@ const translation = { zoomToFit: 'Zoom per Adattare', alignRight: 'A destra', selectionAlignment: 'Allineamento della selezione', - alignBottom: 'Fondoschiena', + alignBottom: 'In basso', alignTop: 'In alto', vertical: 'Verticale', alignCenter: 'Centro', alignLeft: 'A sinistra', - alignMiddle: 'Mezzo', + alignMiddle: 'Centro', horizontal: 'Orizzontale', alignNodes: 'Allinea nodi', distributeHorizontal: 'Spazia orizzontalmente', diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts index fa53075585..b447bff2b5 100644 --- a/web/i18n/ja-JP/workflow.ts +++ b/web/i18n/ja-JP/workflow.ts @@ -287,18 +287,18 @@ const translation = { zoomTo50: '50% サイズ', zoomTo100: '等倍表示', zoomToFit: '画面に合わせる', - horizontal: '横', - alignBottom: '底', - alignNodes: 'ノードを整列させる', + horizontal: '水平', + alignBottom: '下', + alignNodes: 'ノードを整列', vertical: '垂直', alignLeft: '左', - alignTop: 'トップ', + alignTop: '上', alignRight: '右', - alignMiddle: '中間', - distributeVertical: '垂直にスペースを', - alignCenter: 'センター', + alignMiddle: '中央', + distributeVertical: '垂直方向に等間隔配置', + alignCenter: '中央', selectionAlignment: '選択の整列', - distributeHorizontal: '空間を水平方向に', + distributeHorizontal: '水平方向に等間隔配置', }, variableReference: { noAvailableVars: '利用可能な変数がありません', diff --git a/web/i18n/ko-KR/workflow.ts b/web/i18n/ko-KR/workflow.ts index ca83ae3b10..2afbc4bfc3 100644 --- a/web/i18n/ko-KR/workflow.ts +++ b/web/i18n/ko-KR/workflow.ts @@ -308,18 +308,18 @@ const translation = { zoomTo50: '50% 로 확대', zoomTo100: '100% 로 확대', zoomToFit: '화면에 맞게 확대', - alignCenter: '중', + alignCenter: '중앙', alignRight: '오른쪽', alignLeft: '왼쪽', vertical: '세로', - alignTop: '맨 위로', + alignTop: '상단', alignMiddle: '중간', alignNodes: '노드 정렬', - distributeVertical: '수직 공간', + distributeVertical: '수직 등간격', horizontal: '가로', selectionAlignment: '선택 정렬', - alignBottom: '밑바닥', - distributeHorizontal: '수평 공간', + alignBottom: '하단', + distributeHorizontal: '수평 등간격', }, panel: { userInputField: '사용자 입력 필드', diff --git a/web/i18n/pl-PL/workflow.ts b/web/i18n/pl-PL/workflow.ts index 132d050868..468260bc9e 100644 --- a/web/i18n/pl-PL/workflow.ts +++ b/web/i18n/pl-PL/workflow.ts @@ -289,14 +289,14 @@ const translation = { zoomToFit: 'Dopasuj do ekranu', alignMiddle: 'Środek', alignTop: 'Do góry', - distributeHorizontal: 'Odstęp w poziomie', + distributeHorizontal: 'Rozmieść poziomo', alignCenter: 'Centrum', alignRight: 'Prawy', alignNodes: 'Wyrównywanie węzłów', selectionAlignment: 'Wyrównanie zaznaczenia', horizontal: 'Poziomy', - distributeVertical: 'Przestrzeń w pionie', - alignBottom: 'Dno', + distributeVertical: 'Rozmieść pionowo', + alignBottom: 'Dół', alignLeft: 'Lewy', vertical: 'Pionowy', }, diff --git a/web/i18n/pt-BR/workflow.ts b/web/i18n/pt-BR/workflow.ts index e705641666..cc8c14c3b7 100644 --- a/web/i18n/pt-BR/workflow.ts +++ b/web/i18n/pt-BR/workflow.ts @@ -291,14 +291,14 @@ const translation = { alignNodes: 'Alinhar nós', selectionAlignment: 'Alinhamento de seleção', alignLeft: 'Esquerda', - alignBottom: 'Fundo', - distributeHorizontal: 'Espaço horizontalmente', + alignBottom: 'Inferior', + distributeHorizontal: 'Distribuir horizontalmente', alignMiddle: 'Meio', - alignRight: 'Certo', + alignRight: 'Direita', horizontal: 'Horizontal', - distributeVertical: 'Espaço Verticalmente', + distributeVertical: 'Distribuir verticalmente', alignCenter: 'Centro', - alignTop: 'Início', + alignTop: 'Superior', }, panel: { userInputField: 'Campo de entrada do usuário', diff --git a/web/i18n/ro-RO/workflow.ts b/web/i18n/ro-RO/workflow.ts index 5b90ce5abc..e95b6e66c9 100644 --- a/web/i18n/ro-RO/workflow.ts +++ b/web/i18n/ro-RO/workflow.ts @@ -293,11 +293,11 @@ const translation = { alignRight: 'Dreapta', alignLeft: 'Stânga', alignMiddle: 'Mijloc', - distributeVertical: 'Spațiu vertical', + distributeVertical: 'Distribuie vertical', alignCenter: 'Centru', - distributeHorizontal: 'Spațiu orizontal', - alignBottom: 'Fund', - alignTop: 'Culme', + distributeHorizontal: 'Distribuie orizontal', + alignBottom: 'Jos', + alignTop: 'Sus', alignNodes: 'Alinierea nodurilor', }, panel: { diff --git a/web/i18n/ru-RU/workflow.ts b/web/i18n/ru-RU/workflow.ts index 0fff591b50..0b36e680c0 100644 --- a/web/i18n/ru-RU/workflow.ts +++ b/web/i18n/ru-RU/workflow.ts @@ -288,17 +288,17 @@ const translation = { zoomTo100: 'Масштаб 100%', zoomToFit: 'По размеру', alignTop: 'Вверх', - alignBottom: 'Дно', - alignRight: 'Правильно', - distributeHorizontal: 'Пространство по горизонтали', - alignMiddle: 'Середина', + alignBottom: 'Вниз', + alignRight: 'Вправо', + distributeHorizontal: 'Распределить по горизонтали', + alignMiddle: 'По центру', vertical: 'Вертикальный', alignCenter: 'Центр', - alignLeft: 'Налево', + alignLeft: 'Влево', selectionAlignment: 'Выравнивание выделения', horizontal: 'Горизонтальный', alignNodes: 'Выравнивание узлов', - distributeVertical: 'Пространство по вертикали', + distributeVertical: 'Распределить по вертикали', }, panel: { userInputField: 'Поле ввода пользователя', diff --git a/web/i18n/sl-SI/workflow.ts b/web/i18n/sl-SI/workflow.ts index c544a9a35c..df2b7b5159 100644 --- a/web/i18n/sl-SI/workflow.ts +++ b/web/i18n/sl-SI/workflow.ts @@ -287,14 +287,14 @@ const translation = { zoomIn: 'Zoom in', zoomTo50: 'Povečaj na 50%', zoomTo100: 'Povečaj na 100%', - alignMiddle: 'Srednji', - alignBottom: 'Dno', + alignMiddle: 'Sredina', + alignBottom: 'Spodaj', alignCenter: 'Center', - distributeVertical: 'Razmik navpično', + distributeVertical: 'Razporedi navpično', alignRight: 'Desno', alignTop: 'Vrh', vertical: 'Navpičen', - distributeHorizontal: 'Razmik vodoravno', + distributeHorizontal: 'Razporedi vodoravno', selectionAlignment: 'Poravnava izbora', alignNodes: 'Poravnava vozlišč', horizontal: 'Vodoraven', diff --git a/web/i18n/th-TH/workflow.ts b/web/i18n/th-TH/workflow.ts index bcab73d7c4..58f889e6b9 100644 --- a/web/i18n/th-TH/workflow.ts +++ b/web/i18n/th-TH/workflow.ts @@ -287,16 +287,16 @@ const translation = { zoomTo50: 'ซูมไปที่ 50%', zoomTo100: 'ซูมไปที่ 100%', zoomToFit: 'ซูมให้พอดี', - alignBottom: 'ก้น', + alignBottom: 'ด้านล่าง', alignCenter: 'ศูนย์กลาง', alignMiddle: 'กลาง', - horizontal: 'แนวราบ', - vertical: 'ซึ่งตั้งตรง', + horizontal: 'แนวนอน', + vertical: 'แนวตั้ง', alignTop: 'ด้านบน', - distributeVertical: 'พื้นที่ในแนวตั้ง', + distributeVertical: 'ระยะห่างแนวตั้ง', alignLeft: 'ซ้าย', selectionAlignment: 'การจัดตําแหน่งการเลือก', - distributeHorizontal: 'ช่องว่างในแนวนอน', + distributeHorizontal: 'ระยะห่างแนวนอน', alignRight: 'ขวา', alignNodes: 'จัดตําแหน่งโหนด', }, diff --git a/web/i18n/tr-TR/workflow.ts b/web/i18n/tr-TR/workflow.ts index 379c2c30e1..e66cf35561 100644 --- a/web/i18n/tr-TR/workflow.ts +++ b/web/i18n/tr-TR/workflow.ts @@ -293,12 +293,12 @@ const translation = { alignNodes: 'Düğümleri Hizala', vertical: 'Dikey', alignRight: 'Sağ', - alignTop: 'Sayfanın Üstü', - alignBottom: 'Dip', + alignTop: 'Üst', + alignBottom: 'Alt', selectionAlignment: 'Seçim Hizalama', - distributeHorizontal: 'Yatay Boşluk', + distributeHorizontal: 'Yatay Dağıt', horizontal: 'Yatay', - distributeVertical: 'Dikey Boşluk', + distributeVertical: 'Dikey Dağıt', }, panel: { userInputField: 'Kullanıcı Giriş Alanı', diff --git a/web/i18n/uk-UA/workflow.ts b/web/i18n/uk-UA/workflow.ts index e174b0f103..c0aee379d0 100644 --- a/web/i18n/uk-UA/workflow.ts +++ b/web/i18n/uk-UA/workflow.ts @@ -289,14 +289,14 @@ const translation = { zoomToFit: 'Збільшити для підгонки', alignCenter: 'Центр', alignRight: 'Праворуч', - vertical: 'Вертикальні', + vertical: 'Вертикальний', alignBottom: 'Низ', alignLeft: 'Ліворуч', alignTop: 'Верх', - horizontal: 'Горизонтальні', - alignMiddle: 'Середній', - distributeVertical: 'Простір по вертикалі', - distributeHorizontal: 'Простір по горизонталі', + horizontal: 'Горизонтальний', + alignMiddle: 'По центру', + distributeVertical: 'Розподілити по вертикалі', + distributeHorizontal: 'Розподілити по горизонталі', selectionAlignment: 'Вирівнювання вибору', alignNodes: 'Вирівнювання вузлів', }, diff --git a/web/i18n/vi-VN/workflow.ts b/web/i18n/vi-VN/workflow.ts index 79265c3f72..0d34d7af31 100644 --- a/web/i18n/vi-VN/workflow.ts +++ b/web/i18n/vi-VN/workflow.ts @@ -287,18 +287,18 @@ const translation = { zoomTo50: 'Phóng to 50%', zoomTo100: 'Phóng to 100%', zoomToFit: 'Phóng to vừa màn hình', - alignBottom: 'Đáy', - alignMiddle: 'Trung', - alignRight: 'Bên phải', + alignBottom: 'Dưới', + alignMiddle: 'Giữa', + alignRight: 'Phải', alignNodes: 'Căn chỉnh các nút', - alignLeft: 'Bên trái', + alignLeft: 'Trái', horizontal: 'Ngang', - alignCenter: 'Trung tâm', - alignTop: 'Đỉnh', - distributeVertical: 'Không gian theo chiều dọc', + alignCenter: 'Giữa', + alignTop: 'Trên', + distributeVertical: 'Phân bố theo chiều dọc', selectionAlignment: 'Căn chỉnh lựa chọn', - distributeHorizontal: 'Không gian theo chiều ngang', - vertical: 'Thẳng đứng', + distributeHorizontal: 'Phân bố theo chiều ngang', + vertical: 'Dọc', }, panel: { userInputField: 'Trường đầu vào của người dùng', diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts index 41d64f14d7..311997769a 100644 --- a/web/i18n/zh-Hant/workflow.ts +++ b/web/i18n/zh-Hant/workflow.ts @@ -287,18 +287,18 @@ const translation = { zoomTo50: '縮放到 50%', zoomTo100: '放大到 100%', zoomToFit: '自適應視圖', - alignNodes: '對齊節點(Align Nodes)', - distributeVertical: '垂直空間', - alignLeft: '左', - distributeHorizontal: '水平空間', + alignNodes: '對齊節點', + distributeVertical: '垂直等間距', + alignLeft: '左對齊', + distributeHorizontal: '水平等間距', vertical: '垂直', - alignTop: '返回頁首', - alignCenter: '中心', - horizontal: '水準', + alignTop: '頂部對齊', + alignCenter: '居中對齊', + horizontal: '水平', selectionAlignment: '選擇對齊', - alignRight: '右', - alignBottom: '底', - alignMiddle: '中間', + alignRight: '右對齊', + alignBottom: '底部對齊', + alignMiddle: '中部對齊', }, panel: { userInputField: '用戶輸入字段', From b44ecf9bf7a46fc8035c8b137a93e83035d84368 Mon Sep 17 00:00:00 2001 From: NFish Date: Thu, 7 Aug 2025 20:37:05 +0800 Subject: [PATCH 177/415] fix: hide opensource license link when custom branding is enabled (#23569) --- web/app/signin/invite-settings/page.tsx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/web/app/signin/invite-settings/page.tsx b/web/app/signin/invite-settings/page.tsx index 2bc7eba7a4..ecb10a4387 100644 --- a/web/app/signin/invite-settings/page.tsx +++ b/web/app/signin/invite-settings/page.tsx @@ -17,9 +17,11 @@ import { activateMember, invitationCheck } from '@/service/common' import Loading from '@/app/components/base/loading' import Toast from '@/app/components/base/toast' import { noop } from 'lodash-es' +import { useGlobalPublicStore } from '@/context/global-public-context' export default function InviteSettingsPage() { const { t } = useTranslation() + const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) const docLink = useDocLink() const router = useRouter() const searchParams = useSearchParams() @@ -150,7 +152,7 @@ export default function InviteSettingsPage() {
    -
    + {!systemFeatures.branding.enabled &&
    {t('login.license.tip')}   {t('login.license.link')} -
    +
    }
    } From 11d29e8d3e46a95eb707e6447ddf1562e0a7c3d5 Mon Sep 17 00:00:00 2001 From: NFish Date: Thu, 7 Aug 2025 21:01:01 +0800 Subject: [PATCH 178/415] fix: update invite settings page style in dark mode (#23571) --- web/app/signin/invite-settings/page.tsx | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/web/app/signin/invite-settings/page.tsx b/web/app/signin/invite-settings/page.tsx index ecb10a4387..fae62de530 100644 --- a/web/app/signin/invite-settings/page.tsx +++ b/web/app/signin/invite-settings/page.tsx @@ -74,7 +74,7 @@ export default function InviteSettingsPage() { return
    🤷‍♂️
    -

    {t('login.invalid')}

    +

    {t('login.invalid')}

    -

    {t('login.setYourAccount')}

    +

    {t('login.setYourAccount')}

    -
    -
    + Preview or download uploaded files. This endpoint allows you to access files that have been previously uploaded via the File Upload API. + + Files can only be accessed if they belong to messages within the requesting application. + + ### Path Parameters + - `file_id` (string) Required + The unique identifier of the file to preview, obtained from the File Upload API response. + + ### Query Parameters + - `as_attachment` (boolean) Optional + Whether to force download the file as an attachment. Default is `false` (preview in browser). + + ### Response + Returns the file content with appropriate headers for browser display or download. + - `Content-Type` Set based on file mime type + - `Content-Length` File size in bytes (if available) + - `Content-Disposition` Set to "attachment" if `as_attachment=true` + - `Cache-Control` Caching headers for performance + - `Accept-Ranges` Set to "bytes" for audio/video files + + ### Errors + - 400, `invalid_param`, abnormal parameter input + - 403, `file_access_denied`, file access denied or file does not belong to current application + - 404, `file_not_found`, file not found or has been deleted + - 500, internal server error + + + + ### Request Example + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### Download as Attachment + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### Response Headers Example + + ```http {{ title: 'Headers - Image Preview' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### Download Response Headers + + ```http {{ title: 'Headers - File Download' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 + + ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 + + ### パスパラメータ + - `file_id` (string) 必須 + プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 + + ### クエリパラメータ + - `as_attachment` (boolean) オプション + ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 + + ### レスポンス + ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 + - `Content-Type` ファイル MIME タイプに基づいて設定 + - `Content-Length` ファイルサイズ(バイト、利用可能な場合) + - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 + - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー + - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 + + ### エラー + - 400, `invalid_param`, パラメータ入力異常 + - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません + - 404, `file_not_found`, ファイルが見つからないか削除されています + - 500, サーバー内部エラー + + + + ### リクエスト例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 添付ファイルとしてダウンロード + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### レスポンスヘッダー例 + + ```http {{ title: 'ヘッダー - 画像プレビュー' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### ファイルダウンロードレスポンスヘッダー + + ```http {{ title: 'ヘッダー - ファイルダウンロード' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + + 预览或下载已上传的文件。此端点允许您访问先前通过文件上传 API 上传的文件。 + + 文件只能在属于请求应用程序的消息范围内访问。 + + ### 路径参数 + - `file_id` (string) 必需 + 要预览的文件的唯一标识符,从文件上传 API 响应中获得。 + + ### 查询参数 + - `as_attachment` (boolean) 可选 + 是否强制将文件作为附件下载。默认为 `false`(在浏览器中预览)。 + + ### 响应 + 返回带有适当浏览器显示或下载标头的文件内容。 + - `Content-Type` 根据文件 MIME 类型设置 + - `Content-Length` 文件大小(以字节为单位,如果可用) + - `Content-Disposition` 如果 `as_attachment=true` 则设置为 "attachment" + - `Cache-Control` 用于性能的缓存标头 + - `Accept-Ranges` 对于音频/视频文件设置为 "bytes" + + ### 错误 + - 400, `invalid_param`, 参数输入异常 + - 403, `file_access_denied`, 文件访问被拒绝或文件不属于当前应用程序 + - 404, `file_not_found`, 文件未找到或已被删除 + - 500, 服务内部错误 + + + + ### 请求示例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 作为附件下载 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### 响应标头示例 + + ```http {{ title: 'Headers - 图片预览' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### 文件下载响应标头 + + ```http {{ title: 'Headers - 文件下载' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + Preview or download uploaded files. This endpoint allows you to access files that have been previously uploaded via the File Upload API. + + Files can only be accessed if they belong to messages within the requesting application. + + ### Path Parameters + - `file_id` (string) Required + The unique identifier of the file to preview, obtained from the File Upload API response. + + ### Query Parameters + - `as_attachment` (boolean) Optional + Whether to force download the file as an attachment. Default is `false` (preview in browser). + + ### Response + Returns the file content with appropriate headers for browser display or download. + - `Content-Type` Set based on file mime type + - `Content-Length` File size in bytes (if available) + - `Content-Disposition` Set to "attachment" if `as_attachment=true` + - `Cache-Control` Caching headers for performance + - `Accept-Ranges` Set to "bytes" for audio/video files + + ### Errors + - 400, `invalid_param`, abnormal parameter input + - 403, `file_access_denied`, file access denied or file does not belong to current application + - 404, `file_not_found`, file not found or has been deleted + - 500, internal server error + + + + ### Request Example + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### Download as Attachment + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### Response Headers Example + + ```http {{ title: 'Headers - Image Preview' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### Download Response Headers + + ```http {{ title: 'Headers - File Download' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 + + ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 + + ### パスパラメータ + - `file_id` (string) 必須 + プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 + + ### クエリパラメータ + - `as_attachment` (boolean) オプション + ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 + + ### レスポンス + ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 + - `Content-Type` ファイル MIME タイプに基づいて設定 + - `Content-Length` ファイルサイズ(バイト、利用可能な場合) + - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 + - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー + - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 + + ### エラー + - 400, `invalid_param`, パラメータ入力異常 + - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません + - 404, `file_not_found`, ファイルが見つからないか削除されています + - 500, サーバー内部エラー + + + + ### リクエスト例 + + + ```bash {{ title: 'cURL - ブラウザプレビュー' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 添付ファイルとしてダウンロード + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### レスポンスヘッダー例 + + ```http {{ title: 'ヘッダー - 画像プレビュー' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### ダウンロードレスポンスヘッダー + + ```http {{ title: 'ヘッダー - ファイルダウンロード' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + + +--- + --- + + + + + 预览或下载已上传的文件。此端点允许您访问先前通过文件上传 API 上传的文件。 + + 文件只能在属于请求应用程序的消息范围内访问。 + + ### 路径参数 + - `file_id` (string) 必需 + 要预览的文件的唯一标识符,从文件上传 API 响应中获得。 + + ### 查询参数 + - `as_attachment` (boolean) 可选 + 是否强制将文件作为附件下载。默认为 `false`(在浏览器中预览)。 + + ### 响应 + 返回带有适当浏览器显示或下载标头的文件内容。 + - `Content-Type` 根据文件 MIME 类型设置 + - `Content-Length` 文件大小(以字节为单位,如果可用) + - `Content-Disposition` 如果 `as_attachment=true` 则设置为 "attachment" + - `Cache-Control` 用于性能的缓存标头 + - `Accept-Ranges` 对于音频/视频文件设置为 "bytes" + + ### 错误 + - 400, `invalid_param`, 参数输入异常 + - 403, `file_access_denied`, 文件访问被拒绝或文件不属于当前应用程序 + - 404, `file_not_found`, 文件未找到或已被删除 + - 500, 服务内部错误 + + + + ### 请求示例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 作为附件下载 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### 响应标头示例 + + ```http {{ title: 'Headers - 图片预览' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### 文件下载响应标头 + + ```http {{ title: 'Headers - 文件下载' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + Preview or download uploaded files. This endpoint allows you to access files that have been previously uploaded via the File Upload API. + + Files can only be accessed if they belong to messages within the requesting application. + + ### Path Parameters + - `file_id` (string) Required + The unique identifier of the file to preview, obtained from the File Upload API response. + + ### Query Parameters + - `as_attachment` (boolean) Optional + Whether to force download the file as an attachment. Default is `false` (preview in browser). + + ### Response + Returns the file content with appropriate headers for browser display or download. + - `Content-Type` Set based on file mime type + - `Content-Length` File size in bytes (if available) + - `Content-Disposition` Set to "attachment" if `as_attachment=true` + - `Cache-Control` Caching headers for performance + - `Accept-Ranges` Set to "bytes" for audio/video files + + ### Errors + - 400, `invalid_param`, abnormal parameter input + - 403, `file_access_denied`, file access denied or file does not belong to current application + - 404, `file_not_found`, file not found or has been deleted + - 500, internal server error + + + + ### Request Example + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### Download as Attachment + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### Response Headers Example + + ```http {{ title: 'Headers - Image Preview' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### Download Response Headers + + ```http {{ title: 'Headers - File Download' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 + + ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 + + ### パスパラメータ + - `file_id` (string) 必須 + プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 + + ### クエリパラメータ + - `as_attachment` (boolean) オプション + ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 + + ### レスポンス + ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 + - `Content-Type` ファイル MIME タイプに基づいて設定 + - `Content-Length` ファイルサイズ(バイト、利用可能な場合) + - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 + - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー + - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 + + ### エラー + - 400, `invalid_param`, パラメータ入力異常 + - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません + - 404, `file_not_found`, ファイルが見つからないか削除されています + - 500, サーバー内部エラー + + + + ### リクエスト例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 添付ファイルとしてダウンロード + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### レスポンスヘッダー例 + + ```http {{ title: 'Headers - 画像プレビュー' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### ダウンロードレスポンスヘッダー + + ```http {{ title: 'Headers - ファイルダウンロード' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + --- + + + + + 预览或下载已上传的文件。此端点允许您访问先前通过文件上传 API 上传的文件。 + + 文件只能在属于请求应用程序的消息范围内访问。 + + ### 路径参数 + - `file_id` (string) 必需 + 要预览的文件的唯一标识符,从文件上传 API 响应中获得。 + + ### 查询参数 + - `as_attachment` (boolean) 可选 + 是否强制将文件作为附件下载。默认为 `false`(在浏览器中预览)。 + + ### 响应 + 返回带有适当浏览器显示或下载标头的文件内容。 + - `Content-Type` 根据文件 MIME 类型设置 + - `Content-Length` 文件大小(以字节为单位,如果可用) + - `Content-Disposition` 如果 `as_attachment=true` 则设置为 "attachment" + - `Cache-Control` 用于性能的缓存标头 + - `Accept-Ranges` 对于音频/视频文件设置为 "bytes" + + ### 错误 + - 400, `invalid_param`, 参数输入异常 + - 403, `file_access_denied`, 文件访问被拒绝或文件不属于当前应用程序 + - 404, `file_not_found`, 文件未找到或已被删除 + - 500, 服务内部错误 + + + + ### 请求示例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 作为附件下载 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### 响应标头示例 + + ```http {{ title: 'Headers - 图片预览' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### 文件下载响应标头 + + ```http {{ title: 'Headers - 文件下载' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + + + + Preview or download uploaded files. This endpoint allows you to access files that have been previously uploaded via the File Upload API. + + Files can only be accessed if they belong to messages within the requesting application. + + ### Path Parameters + - `file_id` (string) Required + The unique identifier of the file to preview, obtained from the File Upload API response. + + ### Query Parameters + - `as_attachment` (boolean) Optional + Whether to force download the file as an attachment. Default is `false` (preview in browser). + + ### Response + Returns the file content with appropriate headers for browser display or download. + - `Content-Type` Set based on file mime type + - `Content-Length` File size in bytes (if available) + - `Content-Disposition` Set to "attachment" if `as_attachment=true` + - `Cache-Control` Caching headers for performance + - `Accept-Ranges` Set to "bytes" for audio/video files + + ### Errors + - 400, `invalid_param`, abnormal parameter input + - 403, `file_access_denied`, file access denied or file does not belong to current application + - 404, `file_not_found`, file not found or has been deleted + - 500, internal server error + + + + ### Request Example + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### Download as Attachment + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### Response Headers Example + + ```http {{ title: 'Headers - Image Preview' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### Download Response Headers + + ```http {{ title: 'Headers - File Download' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + + +--- + + + + アップロードされたファイルをプレビューまたはダウンロードします。このエンドポイントを使用すると、以前にファイルアップロード API でアップロードされたファイルにアクセスできます。 + + ファイルは、リクエストしているアプリケーションのメッセージ範囲内にある場合のみアクセス可能です。 + + ### パスパラメータ + - `file_id` (string) 必須 + プレビューするファイルの一意識別子。ファイルアップロード API レスポンスから取得します。 + + ### クエリパラメータ + - `as_attachment` (boolean) オプション + ファイルを添付ファイルとして強制ダウンロードするかどうか。デフォルトは `false`(ブラウザでプレビュー)。 + + ### レスポンス + ブラウザ表示またはダウンロード用の適切なヘッダー付きでファイル内容を返します。 + - `Content-Type` ファイル MIME タイプに基づいて設定 + - `Content-Length` ファイルサイズ(バイト、利用可能な場合) + - `Content-Disposition` `as_attachment=true` の場合は "attachment" に設定 + - `Cache-Control` パフォーマンス向上のためのキャッシュヘッダー + - `Accept-Ranges` 音声/動画ファイルの場合は "bytes" に設定 + + ### エラー + - 400, `invalid_param`, パラメータ入力異常 + - 403, `file_access_denied`, ファイルアクセス拒否またはファイルが現在のアプリケーションに属していません + - 404, `file_not_found`, ファイルが見つからないか削除されています + - 500, サーバー内部エラー + + + + ### リクエスト例 + + + ```bash {{ title: 'cURL - ブラウザプレビュー' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 添付ファイルとしてダウンロード + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### レスポンスヘッダー例 + + ```http {{ title: 'ヘッダー - 画像プレビュー' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### ダウンロードレスポンスヘッダー + + ```http {{ title: 'ヘッダー - ファイルダウンロード' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + + +--- + --- + + + + 预览或下载已上传的文件。此端点允许您访问先前通过文件上传 API 上传的文件。 + + 文件只能在属于请求应用程序的消息范围内访问。 + + ### 路径参数 + - `file_id` (string) 必需 + 要预览的文件的唯一标识符,从文件上传 API 响应中获得。 + + ### 查询参数 + - `as_attachment` (boolean) 可选 + 是否强制将文件作为附件下载。默认为 `false`(在浏览器中预览)。 + + ### 响应 + 返回带有适当浏览器显示或下载标头的文件内容。 + - `Content-Type` 根据文件 MIME 类型设置 + - `Content-Length` 文件大小(以字节为单位,如果可用) + - `Content-Disposition` 如果 `as_attachment=true` 则设置为 "attachment" + - `Cache-Control` 用于性能的缓存标头 + - `Accept-Ranges` 对于音频/视频文件设置为 "bytes" + + ### 错误 + - 400, `invalid_param`, 参数输入异常 + - 403, `file_access_denied`, 文件访问被拒绝或文件不属于当前应用程序 + - 404, `file_not_found`, 文件未找到或已被删除 + - 500, 服务内部错误 + + + + ### 请求示例 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview' \ + --header 'Authorization: Bearer {api_key}' + ``` + + + + ### 作为附件下载 + + + ```bash {{ title: 'cURL' }} + curl -X GET '${props.appDetail.api_base_url}/files/72fa9618-8f89-4a37-9b33-7e1178a24a67/preview?as_attachment=true' \ + --header 'Authorization: Bearer {api_key}' \ + --output downloaded_file.png + ``` + + + + ### 响应标头示例 + + ```http {{ title: 'Headers - 图片预览' }} + Content-Type: image/png + Content-Length: 1024 + Cache-Control: public, max-age=3600 + ``` + + + ### 文件下载响应标头 + + ```http {{ title: 'Headers - 文件下载' }} + Content-Type: image/png + Content-Length: 1024 + Content-Disposition: attachment; filename*=UTF-8''example.png + Cache-Control: public, max-age=3600 + ``` + + + +--- + Date: Fri, 8 Aug 2025 08:50:37 +0800 Subject: [PATCH 181/415] fix: empty arrays should convert to empty string in LLM prompts (#23590) --- api/core/variables/segments.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index 13274f4e0e..a99f5eece3 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -119,6 +119,13 @@ class ObjectSegment(Segment): class ArraySegment(Segment): + @property + def text(self) -> str: + # Return empty string for empty arrays instead of "[]" + if not self.value: + return "" + return super().text + @property def markdown(self) -> str: items = [] @@ -155,6 +162,9 @@ class ArrayStringSegment(ArraySegment): @property def text(self) -> str: + # Return empty string for empty arrays instead of "[]" + if not self.value: + return "" return json.dumps(self.value, ensure_ascii=False) From 5889059ce464f4472dc737f1b50ac4f5e3818bbb Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Fri, 8 Aug 2025 09:03:50 +0800 Subject: [PATCH 182/415] Feat add testcontainers test for annnotation service (#23593) --- .../services/test_annotation_service.py | 1252 +++++++++++++++++ 1 file changed, 1252 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_annotation_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_annotation_service.py b/api/tests/test_containers_integration_tests/services/test_annotation_service.py new file mode 100644 index 0000000000..0ab5f398e3 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_annotation_service.py @@ -0,0 +1,1252 @@ +from unittest.mock import patch + +import pytest +from faker import Faker +from werkzeug.exceptions import NotFound + +from models.model import MessageAnnotation +from services.annotation_service import AppAnnotationService +from services.app_service import AppService + + +class TestAnnotationService: + """Integration tests for AnnotationService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.account_service.FeatureService") as mock_account_feature_service, + patch("services.annotation_service.FeatureService") as mock_feature_service, + patch("services.annotation_service.add_annotation_to_index_task") as mock_add_task, + patch("services.annotation_service.update_annotation_to_index_task") as mock_update_task, + patch("services.annotation_service.delete_annotation_index_task") as mock_delete_task, + patch("services.annotation_service.enable_annotation_reply_task") as mock_enable_task, + patch("services.annotation_service.disable_annotation_reply_task") as mock_disable_task, + patch("services.annotation_service.batch_import_annotations_task") as mock_batch_import_task, + patch("services.annotation_service.current_user") as mock_current_user, + ): + # Setup default mock returns + mock_account_feature_service.get_features.return_value.billing.enabled = False + mock_add_task.delay.return_value = None + mock_update_task.delay.return_value = None + mock_delete_task.delay.return_value = None + mock_enable_task.delay.return_value = None + mock_disable_task.delay.return_value = None + mock_batch_import_task.delay.return_value = None + + yield { + "account_feature_service": mock_account_feature_service, + "feature_service": mock_feature_service, + "add_task": mock_add_task, + "update_task": mock_update_task, + "delete_task": mock_delete_task, + "enable_task": mock_enable_task, + "disable_task": mock_disable_task, + "batch_import_task": mock_batch_import_task, + "current_user": mock_current_user, + } + + def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test app and account for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (app, account) - Created app and account instances + """ + fake = Faker() + + # Setup mocks for account creation + mock_external_service_dependencies[ + "account_feature_service" + ].get_system_features.return_value.is_allow_register = True + + # Create account and tenant first + from services.account_service import AccountService, TenantService + + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Setup app creation arguments + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "chat", + "icon_type": "emoji", + "icon": "🤖", + "icon_background": "#FF6B6B", + "api_rph": 100, + "api_rpm": 10, + } + + # Create app + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Setup current_user mock + self._mock_current_user(mock_external_service_dependencies, account.id, tenant.id) + + return app, account + + def _mock_current_user(self, mock_external_service_dependencies, account_id, tenant_id): + """ + Helper method to mock the current user for testing. + """ + mock_external_service_dependencies["current_user"].id = account_id + mock_external_service_dependencies["current_user"].current_tenant_id = tenant_id + + def _create_test_conversation(self, app, account, fake): + """ + Helper method to create a test conversation with all required fields. + """ + from extensions.ext_database import db + from models.model import Conversation + + conversation = Conversation( + app_id=app.id, + app_model_config_id=None, + model_provider=None, + model_id="", + override_model_configs=None, + mode=app.mode, + name=fake.sentence(), + inputs={}, + introduction="", + system_instruction="", + system_instruction_tokens=0, + status="normal", + invoke_from="console", + from_source="console", + from_end_user_id=None, + from_account_id=account.id, + ) + + db.session.add(conversation) + db.session.flush() + return conversation + + def _create_test_message(self, app, conversation, account, fake): + """ + Helper method to create a test message with all required fields. + """ + import json + + from extensions.ext_database import db + from models.model import Message + + message = Message( + app_id=app.id, + model_provider=None, + model_id="", + override_model_configs=None, + conversation_id=conversation.id, + inputs={}, + query=fake.sentence(), + message=json.dumps([{"role": "user", "text": fake.sentence()}]), + message_tokens=0, + message_unit_price=0, + message_price_unit=0.001, + answer=fake.text(max_nb_chars=200), + answer_tokens=0, + answer_unit_price=0, + answer_price_unit=0.001, + parent_message_id=None, + provider_response_latency=0, + total_price=0, + currency="USD", + invoke_from="console", + from_source="console", + from_end_user_id=None, + from_account_id=account.id, + ) + + db.session.add(message) + db.session.commit() + return message + + def test_insert_app_annotation_directly_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful direct insertion of app annotation. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Setup annotation data + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Insert annotation directly + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Verify annotation was created correctly + assert annotation.app_id == app.id + assert annotation.question == annotation_args["question"] + assert annotation.content == annotation_args["answer"] + assert annotation.account_id == account.id + assert annotation.hit_count == 0 + assert annotation.id is not None + + # Verify annotation was saved to database + from extensions.ext_database import db + + db.session.refresh(annotation) + assert annotation.id is not None + + # Verify add_annotation_to_index_task was called (when annotation setting exists) + # Note: In this test, no annotation setting exists, so task should not be called + mock_external_service_dependencies["add_task"].delay.assert_not_called() + + def test_insert_app_annotation_directly_app_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test direct insertion of app annotation when app is not found. + """ + fake = Faker() + non_existent_app_id = fake.uuid4() + + # Mock random current user to avoid dependency issues + self._mock_current_user(mock_external_service_dependencies, fake.uuid4(), fake.uuid4()) + + # Setup annotation data + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Try to insert annotation with non-existent app + with pytest.raises(NotFound, match="App not found"): + AppAnnotationService.insert_app_annotation_directly(annotation_args, non_existent_app_id) + + def test_update_app_annotation_directly_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful direct update of app annotation. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # First, create an annotation + original_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(original_args, app.id) + + # Update the annotation + updated_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + updated_annotation = AppAnnotationService.update_app_annotation_directly(updated_args, app.id, annotation.id) + + # Verify annotation was updated correctly + assert updated_annotation.id == annotation.id + assert updated_annotation.app_id == app.id + assert updated_annotation.question == updated_args["question"] + assert updated_annotation.content == updated_args["answer"] + assert updated_annotation.account_id == account.id + + # Verify original values were changed + assert updated_annotation.question != original_args["question"] + assert updated_annotation.content != original_args["answer"] + + # Verify update_annotation_to_index_task was called (when annotation setting exists) + # Note: In this test, no annotation setting exists, so task should not be called + mock_external_service_dependencies["update_task"].delay.assert_not_called() + + def test_up_insert_app_annotation_from_message_new( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating new annotation from message. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create a conversation and message first + conversation = self._create_test_conversation(app, account, fake) + message = self._create_test_message(app, conversation, account, fake) + + # Setup annotation data with message_id + annotation_args = { + "message_id": message.id, + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Insert annotation from message + annotation = AppAnnotationService.up_insert_app_annotation_from_message(annotation_args, app.id) + + # Verify annotation was created correctly + assert annotation.app_id == app.id + assert annotation.conversation_id == conversation.id + assert annotation.message_id == message.id + assert annotation.question == annotation_args["question"] + assert annotation.content == annotation_args["answer"] + assert annotation.account_id == account.id + + # Verify add_annotation_to_index_task was called (when annotation setting exists) + # Note: In this test, no annotation setting exists, so task should not be called + mock_external_service_dependencies["add_task"].delay.assert_not_called() + + def test_up_insert_app_annotation_from_message_update( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test updating existing annotation from message. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create a conversation and message first + conversation = self._create_test_conversation(app, account, fake) + message = self._create_test_message(app, conversation, account, fake) + + # Create initial annotation + initial_args = { + "message_id": message.id, + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + initial_annotation = AppAnnotationService.up_insert_app_annotation_from_message(initial_args, app.id) + + # Update the annotation + updated_args = { + "message_id": message.id, + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + updated_annotation = AppAnnotationService.up_insert_app_annotation_from_message(updated_args, app.id) + + # Verify annotation was updated correctly (same ID) + assert updated_annotation.id == initial_annotation.id + assert updated_annotation.question == updated_args["question"] + assert updated_annotation.content == updated_args["answer"] + assert updated_annotation.question != initial_args["question"] + assert updated_annotation.content != initial_args["answer"] + + # Verify add_annotation_to_index_task was called (when annotation setting exists) + # Note: In this test, no annotation setting exists, so task should not be called + mock_external_service_dependencies["add_task"].delay.assert_not_called() + + def test_up_insert_app_annotation_from_message_app_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating annotation from message when app is not found. + """ + fake = Faker() + non_existent_app_id = fake.uuid4() + + # Mock random current user to avoid dependency issues + self._mock_current_user(mock_external_service_dependencies, fake.uuid4(), fake.uuid4()) + + # Setup annotation data + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Try to insert annotation with non-existent app + with pytest.raises(NotFound, match="App not found"): + AppAnnotationService.up_insert_app_annotation_from_message(annotation_args, non_existent_app_id) + + def test_get_annotation_list_by_app_id_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful retrieval of annotation list by app ID. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create multiple annotations + annotations = [] + for i in range(3): + annotation_args = { + "question": f"Question {i}: {fake.sentence()}", + "answer": f"Answer {i}: {fake.text(max_nb_chars=200)}", + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + annotations.append(annotation) + + # Get annotation list + annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id( + app.id, page=1, limit=10, keyword="" + ) + + # Verify results + assert len(annotation_list) == 3 + assert total == 3 + + # Verify all annotations belong to the correct app + for annotation in annotation_list: + assert annotation.app_id == app.id + assert annotation.account_id == account.id + + def test_get_annotation_list_by_app_id_with_keyword( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test retrieval of annotation list with keyword search. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotations with specific keywords + unique_keyword = fake.word() + annotation_args = { + "question": f"Question with {unique_keyword} keyword", + "answer": f"Answer with {unique_keyword} keyword", + } + AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Create another annotation without the keyword + other_args = { + "question": "Question without keyword", + "answer": "Answer without keyword", + } + AppAnnotationService.insert_app_annotation_directly(other_args, app.id) + + # Search with keyword + annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id( + app.id, page=1, limit=10, keyword=unique_keyword + ) + + # Verify only matching annotations are returned + assert len(annotation_list) == 1 + assert total == 1 + assert unique_keyword in annotation_list[0].question or unique_keyword in annotation_list[0].content + + def test_get_annotation_list_by_app_id_app_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test retrieval of annotation list when app is not found. + """ + fake = Faker() + non_existent_app_id = fake.uuid4() + + # Mock random current user to avoid dependency issues + self._mock_current_user(mock_external_service_dependencies, fake.uuid4(), fake.uuid4()) + + # Try to get annotation list with non-existent app + with pytest.raises(NotFound, match="App not found"): + AppAnnotationService.get_annotation_list_by_app_id(non_existent_app_id, page=1, limit=10, keyword="") + + def test_delete_app_annotation_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful deletion of app annotation. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create an annotation first + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + annotation_id = annotation.id + + # Delete the annotation + AppAnnotationService.delete_app_annotation(app.id, annotation_id) + + # Verify annotation was deleted + from extensions.ext_database import db + + deleted_annotation = db.session.query(MessageAnnotation).filter(MessageAnnotation.id == annotation_id).first() + assert deleted_annotation is None + + # Verify delete_annotation_index_task was called (when annotation setting exists) + # Note: In this test, no annotation setting exists, so task should not be called + mock_external_service_dependencies["delete_task"].delay.assert_not_called() + + def test_delete_app_annotation_app_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test deletion of app annotation when app is not found. + """ + fake = Faker() + non_existent_app_id = fake.uuid4() + annotation_id = fake.uuid4() + + # Mock random current user to avoid dependency issues + self._mock_current_user(mock_external_service_dependencies, fake.uuid4(), fake.uuid4()) + + # Try to delete annotation with non-existent app + with pytest.raises(NotFound, match="App not found"): + AppAnnotationService.delete_app_annotation(non_existent_app_id, annotation_id) + + def test_delete_app_annotation_annotation_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test deletion of app annotation when annotation is not found. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + non_existent_annotation_id = fake.uuid4() + + # Try to delete non-existent annotation + with pytest.raises(NotFound, match="Annotation not found"): + AppAnnotationService.delete_app_annotation(app.id, non_existent_annotation_id) + + def test_enable_app_annotation_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful enabling of app annotation. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Setup enable arguments + enable_args = { + "score_threshold": 0.8, + "embedding_provider_name": "openai", + "embedding_model_name": "text-embedding-ada-002", + } + + # Enable annotation + result = AppAnnotationService.enable_app_annotation(enable_args, app.id) + + # Verify result structure + assert "job_id" in result + assert "job_status" in result + assert result["job_status"] == "waiting" + assert result["job_id"] is not None + + # Verify task was called + mock_external_service_dependencies["enable_task"].delay.assert_called_once() + + def test_disable_app_annotation_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful disabling of app annotation. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Disable annotation + result = AppAnnotationService.disable_app_annotation(app.id) + + # Verify result structure + assert "job_id" in result + assert "job_status" in result + assert result["job_status"] == "waiting" + assert result["job_id"] is not None + + # Verify task was called + mock_external_service_dependencies["disable_task"].delay.assert_called_once() + + def test_enable_app_annotation_cached_job(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test enabling app annotation when job is already cached. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Mock Redis to return cached job + from extensions.ext_redis import redis_client + + cached_job_id = fake.uuid4() + enable_app_annotation_key = f"enable_app_annotation_{app.id}" + redis_client.set(enable_app_annotation_key, cached_job_id) + + # Setup enable arguments + enable_args = { + "score_threshold": 0.8, + "embedding_provider_name": "openai", + "embedding_model_name": "text-embedding-ada-002", + } + + # Enable annotation (should return cached job) + result = AppAnnotationService.enable_app_annotation(enable_args, app.id) + + # Verify cached result + assert cached_job_id == result["job_id"].decode("utf-8") + assert result["job_status"] == "processing" + + # Verify task was not called again + mock_external_service_dependencies["enable_task"].delay.assert_not_called() + + # Clean up + redis_client.delete(enable_app_annotation_key) + + def test_get_annotation_hit_histories_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of annotation hit histories. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create an annotation first + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Add some hit histories + for i in range(3): + AppAnnotationService.add_annotation_history( + annotation_id=annotation.id, + app_id=app.id, + annotation_question=annotation.question, + annotation_content=annotation.content, + query=f"Query {i}: {fake.sentence()}", + user_id=account.id, + message_id=fake.uuid4(), + from_source="console", + score=0.8 + (i * 0.1), + ) + + # Get hit histories + hit_histories, total = AppAnnotationService.get_annotation_hit_histories( + app.id, annotation.id, page=1, limit=10 + ) + + # Verify results + assert len(hit_histories) == 3 + assert total == 3 + + # Verify all histories belong to the correct annotation + for history in hit_histories: + assert history.annotation_id == annotation.id + assert history.app_id == app.id + assert history.account_id == account.id + + def test_add_annotation_history_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful addition of annotation history. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create an annotation first + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Get initial hit count + initial_hit_count = annotation.hit_count + + # Add annotation history + query = fake.sentence() + message_id = fake.uuid4() + score = 0.85 + + AppAnnotationService.add_annotation_history( + annotation_id=annotation.id, + app_id=app.id, + annotation_question=annotation.question, + annotation_content=annotation.content, + query=query, + user_id=account.id, + message_id=message_id, + from_source="console", + score=score, + ) + + # Verify hit count was incremented + from extensions.ext_database import db + + db.session.refresh(annotation) + assert annotation.hit_count == initial_hit_count + 1 + + # Verify history was created + from models.model import AppAnnotationHitHistory + + history = ( + db.session.query(AppAnnotationHitHistory) + .filter( + AppAnnotationHitHistory.annotation_id == annotation.id, AppAnnotationHitHistory.message_id == message_id + ) + .first() + ) + + assert history is not None + assert history.app_id == app.id + assert history.account_id == account.id + assert history.question == query + assert history.score == score + assert history.source == "console" + + def test_get_annotation_by_id_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of annotation by ID. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create an annotation + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + created_annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Get annotation by ID + retrieved_annotation = AppAnnotationService.get_annotation_by_id(created_annotation.id) + + # Verify annotation was retrieved correctly + assert retrieved_annotation is not None + assert retrieved_annotation.id == created_annotation.id + assert retrieved_annotation.app_id == app.id + assert retrieved_annotation.question == annotation_args["question"] + assert retrieved_annotation.content == annotation_args["answer"] + assert retrieved_annotation.account_id == account.id + + def test_batch_import_app_annotations_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful batch import of app annotations. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create CSV content + csv_content = "Question 1,Answer 1\nQuestion 2,Answer 2\nQuestion 3,Answer 3" + + # Mock FileStorage + from io import BytesIO + + from werkzeug.datastructures import FileStorage + + file_storage = FileStorage( + stream=BytesIO(csv_content.encode("utf-8")), filename="annotations.csv", content_type="text/csv" + ) + + mock_external_service_dependencies["feature_service"].get_features.return_value.billing.enabled = False + + # Mock pandas to return expected DataFrame + import pandas as pd + + with patch("services.annotation_service.pd") as mock_pd: + mock_df = pd.DataFrame( + {0: ["Question 1", "Question 2", "Question 3"], 1: ["Answer 1", "Answer 2", "Answer 3"]} + ) + mock_pd.read_csv.return_value = mock_df + + # Batch import annotations + result = AppAnnotationService.batch_import_app_annotations(app.id, file_storage) + + # Verify result structure + assert "job_id" in result + assert "job_status" in result + assert result["job_status"] == "waiting" + assert result["job_id"] is not None + + # Verify task was called + mock_external_service_dependencies["batch_import_task"].delay.assert_called_once() + + def test_batch_import_app_annotations_empty_file( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test batch import with empty CSV file. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create empty CSV content + csv_content = "" + + # Mock FileStorage + from io import BytesIO + + from werkzeug.datastructures import FileStorage + + file_storage = FileStorage( + stream=BytesIO(csv_content.encode("utf-8")), filename="annotations.csv", content_type="text/csv" + ) + + # Mock pandas to return empty DataFrame + import pandas as pd + + with patch("services.annotation_service.pd") as mock_pd: + mock_df = pd.DataFrame() + mock_pd.read_csv.return_value = mock_df + + # Batch import annotations + result = AppAnnotationService.batch_import_app_annotations(app.id, file_storage) + + # Verify error result + assert "error_msg" in result + assert "empty" in result["error_msg"].lower() + + def test_batch_import_app_annotations_quota_exceeded( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test batch import when quota is exceeded. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create CSV content + csv_content = "Question 1,Answer 1\nQuestion 2,Answer 2\nQuestion 3,Answer 3" + + # Mock FileStorage + from io import BytesIO + + from werkzeug.datastructures import FileStorage + + file_storage = FileStorage( + stream=BytesIO(csv_content.encode("utf-8")), filename="annotations.csv", content_type="text/csv" + ) + + # Mock pandas to return DataFrame + import pandas as pd + + with patch("services.annotation_service.pd") as mock_pd: + mock_df = pd.DataFrame( + {0: ["Question 1", "Question 2", "Question 3"], 1: ["Answer 1", "Answer 2", "Answer 3"]} + ) + mock_pd.read_csv.return_value = mock_df + + # Mock FeatureService to return billing enabled with quota exceeded + mock_external_service_dependencies["feature_service"].get_features.return_value.billing.enabled = True + mock_external_service_dependencies[ + "feature_service" + ].get_features.return_value.annotation_quota_limit.limit = 1 + mock_external_service_dependencies[ + "feature_service" + ].get_features.return_value.annotation_quota_limit.size = 0 + + # Batch import annotations + result = AppAnnotationService.batch_import_app_annotations(app.id, file_storage) + + # Verify error result + assert "error_msg" in result + assert "limit" in result["error_msg"].lower() + + def test_get_app_annotation_setting_by_app_id_enabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting enabled app annotation setting by app ID. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # Get annotation setting + result = AppAnnotationService.get_app_annotation_setting_by_app_id(app.id) + + # Verify result structure + assert result["enabled"] is True + assert result["id"] == annotation_setting.id + assert result["score_threshold"] == 0.8 + assert result["embedding_model"]["embedding_provider_name"] == "openai" + assert result["embedding_model"]["embedding_model_name"] == "text-embedding-ada-002" + + def test_get_app_annotation_setting_by_app_id_disabled( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test getting disabled app annotation setting by app ID. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Get annotation setting (no setting exists) + result = AppAnnotationService.get_app_annotation_setting_by_app_id(app.id) + + # Verify result structure + assert result["enabled"] is False + + def test_update_app_annotation_setting_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful update of app annotation setting. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting first + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # Update annotation setting + update_args = { + "score_threshold": 0.9, + } + + result = AppAnnotationService.update_app_annotation_setting(app.id, annotation_setting.id, update_args) + + # Verify result structure + assert result["enabled"] is True + assert result["id"] == annotation_setting.id + assert result["score_threshold"] == 0.9 + assert result["embedding_model"]["embedding_provider_name"] == "openai" + assert result["embedding_model"]["embedding_model_name"] == "text-embedding-ada-002" + + # Verify database was updated + db.session.refresh(annotation_setting) + assert annotation_setting.score_threshold == 0.9 + + def test_export_annotation_list_by_app_id_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful export of annotation list by app ID. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create multiple annotations + annotations = [] + for i in range(3): + annotation_args = { + "question": f"Question {i}: {fake.sentence()}", + "answer": f"Answer {i}: {fake.text(max_nb_chars=200)}", + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + annotations.append(annotation) + + # Export annotation list + exported_annotations = AppAnnotationService.export_annotation_list_by_app_id(app.id) + + # Verify results + assert len(exported_annotations) == 3 + + # Verify all annotations belong to the correct app and are ordered by created_at desc + for i, annotation in enumerate(exported_annotations): + assert annotation.app_id == app.id + assert annotation.account_id == account.id + if i > 0: + # Verify descending order (newer first) + assert annotation.created_at <= exported_annotations[i - 1].created_at + + def test_export_annotation_list_by_app_id_app_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test export of annotation list when app is not found. + """ + fake = Faker() + non_existent_app_id = fake.uuid4() + + # Mock random current user to avoid dependency issues + self._mock_current_user(mock_external_service_dependencies, fake.uuid4(), fake.uuid4()) + + # Try to export annotation list with non-existent app + with pytest.raises(NotFound, match="App not found"): + AppAnnotationService.export_annotation_list_by_app_id(non_existent_app_id) + + def test_insert_app_annotation_directly_with_setting_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful direct insertion of app annotation with annotation setting enabled. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting first + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # Setup annotation data + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Insert annotation directly + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + + # Verify annotation was created correctly + assert annotation.app_id == app.id + assert annotation.question == annotation_args["question"] + assert annotation.content == annotation_args["answer"] + assert annotation.account_id == account.id + assert annotation.hit_count == 0 + assert annotation.id is not None + + # Verify add_annotation_to_index_task was called + mock_external_service_dependencies["add_task"].delay.assert_called_once() + call_args = mock_external_service_dependencies["add_task"].delay.call_args[0] + assert call_args[0] == annotation.id # annotation_id + assert call_args[1] == annotation_args["question"] # question + assert call_args[2] == account.current_tenant_id # tenant_id + assert call_args[3] == app.id # app_id + assert call_args[4] == collection_binding.id # collection_binding_id + + def test_update_app_annotation_directly_with_setting_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful direct update of app annotation with annotation setting enabled. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting first + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # First, create an annotation + original_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(original_args, app.id) + + # Reset mock to clear previous calls + mock_external_service_dependencies["update_task"].delay.reset_mock() + + # Update the annotation + updated_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + updated_annotation = AppAnnotationService.update_app_annotation_directly(updated_args, app.id, annotation.id) + + # Verify annotation was updated correctly + assert updated_annotation.id == annotation.id + assert updated_annotation.app_id == app.id + assert updated_annotation.question == updated_args["question"] + assert updated_annotation.content == updated_args["answer"] + assert updated_annotation.account_id == account.id + + # Verify original values were changed + assert updated_annotation.question != original_args["question"] + assert updated_annotation.content != original_args["answer"] + + # Verify update_annotation_to_index_task was called + mock_external_service_dependencies["update_task"].delay.assert_called_once() + call_args = mock_external_service_dependencies["update_task"].delay.call_args[0] + assert call_args[0] == annotation.id # annotation_id + assert call_args[1] == updated_args["question"] # question + assert call_args[2] == account.current_tenant_id # tenant_id + assert call_args[3] == app.id # app_id + assert call_args[4] == collection_binding.id # collection_binding_id + + def test_delete_app_annotation_with_setting_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful deletion of app annotation with annotation setting enabled. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting first + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # Create an annotation first + annotation_args = { + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + annotation = AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + annotation_id = annotation.id + + # Reset mock to clear previous calls + mock_external_service_dependencies["delete_task"].delay.reset_mock() + + # Delete the annotation + AppAnnotationService.delete_app_annotation(app.id, annotation_id) + + # Verify annotation was deleted + deleted_annotation = db.session.query(MessageAnnotation).filter(MessageAnnotation.id == annotation_id).first() + assert deleted_annotation is None + + # Verify delete_annotation_index_task was called + mock_external_service_dependencies["delete_task"].delay.assert_called_once() + call_args = mock_external_service_dependencies["delete_task"].delay.call_args[0] + assert call_args[0] == annotation_id # annotation_id + assert call_args[1] == app.id # app_id + assert call_args[2] == account.current_tenant_id # tenant_id + assert call_args[3] == collection_binding.id # collection_binding_id + + def test_up_insert_app_annotation_from_message_with_setting_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creating annotation from message with annotation setting enabled. + """ + fake = Faker() + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Create annotation setting first + from extensions.ext_database import db + from models.dataset import DatasetCollectionBinding + from models.model import AppAnnotationSetting + + # Create a collection binding first + collection_binding = DatasetCollectionBinding() + collection_binding.id = fake.uuid4() + collection_binding.provider_name = "openai" + collection_binding.model_name = "text-embedding-ada-002" + collection_binding.type = "annotation" + collection_binding.collection_name = f"annotation_collection_{fake.uuid4()}" + db.session.add(collection_binding) + db.session.flush() + + # Create annotation setting + annotation_setting = AppAnnotationSetting() + annotation_setting.app_id = app.id + annotation_setting.score_threshold = 0.8 + annotation_setting.collection_binding_id = collection_binding.id + annotation_setting.created_user_id = account.id + annotation_setting.updated_user_id = account.id + db.session.add(annotation_setting) + db.session.commit() + + # Create a conversation and message first + conversation = self._create_test_conversation(app, account, fake) + message = self._create_test_message(app, conversation, account, fake) + + # Setup annotation data with message_id + annotation_args = { + "message_id": message.id, + "question": fake.sentence(), + "answer": fake.text(max_nb_chars=200), + } + + # Insert annotation from message + annotation = AppAnnotationService.up_insert_app_annotation_from_message(annotation_args, app.id) + + # Verify annotation was created correctly + assert annotation.app_id == app.id + assert annotation.conversation_id == conversation.id + assert annotation.message_id == message.id + assert annotation.question == annotation_args["question"] + assert annotation.content == annotation_args["answer"] + assert annotation.account_id == account.id + + # Verify add_annotation_to_index_task was called + mock_external_service_dependencies["add_task"].delay.assert_called_once() + call_args = mock_external_service_dependencies["add_task"].delay.call_args[0] + assert call_args[0] == annotation.id # annotation_id + assert call_args[1] == annotation_args["question"] # question + assert call_args[2] == account.current_tenant_id # tenant_id + assert call_args[3] == app.id # app_id + assert call_args[4] == collection_binding.id # collection_binding_id From 2edd32fdea9cbf308a6b85160323f278a4d5c2ca Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Fri, 8 Aug 2025 09:05:55 +0800 Subject: [PATCH 183/415] fix: resolve AppCard description overlap with tag area (#23585) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/console/app/app.py | 12 +- api/controllers/console/datasets/datasets.py | 4 +- .../service_api/dataset/dataset.py | 4 +- .../app/test_description_validation.py | 168 ++++++++++++ .../app/test_description_validation.py | 252 ++++++++++++++++++ web/__tests__/description-validation.test.tsx | 97 +++++++ .../components/app/create-app-modal/index.tsx | 7 +- web/app/components/apps/app-card.tsx | 9 +- 8 files changed, 541 insertions(+), 12 deletions(-) create mode 100644 api/tests/integration_tests/controllers/console/app/test_description_validation.py create mode 100644 api/tests/unit_tests/controllers/console/app/test_description_validation.py create mode 100644 web/__tests__/description-validation.test.tsx diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 9fe32dde6d..1cc13d669c 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -28,6 +28,12 @@ from services.feature_service import FeatureService ALLOW_CREATE_APP_MODES = ["chat", "agent-chat", "advanced-chat", "workflow", "completion"] +def _validate_description_length(description): + if description and len(description) > 400: + raise ValueError("Description cannot exceed 400 characters.") + return description + + class AppListApi(Resource): @setup_required @login_required @@ -94,7 +100,7 @@ class AppListApi(Resource): """Create app""" parser = reqparse.RequestParser() parser.add_argument("name", type=str, required=True, location="json") - parser.add_argument("description", type=str, location="json") + parser.add_argument("description", type=_validate_description_length, location="json") parser.add_argument("mode", type=str, choices=ALLOW_CREATE_APP_MODES, location="json") parser.add_argument("icon_type", type=str, location="json") parser.add_argument("icon", type=str, location="json") @@ -146,7 +152,7 @@ class AppApi(Resource): parser = reqparse.RequestParser() parser.add_argument("name", type=str, required=True, nullable=False, location="json") - parser.add_argument("description", type=str, location="json") + parser.add_argument("description", type=_validate_description_length, location="json") parser.add_argument("icon_type", type=str, location="json") parser.add_argument("icon", type=str, location="json") parser.add_argument("icon_background", type=str, location="json") @@ -189,7 +195,7 @@ class AppCopyApi(Resource): parser = reqparse.RequestParser() parser.add_argument("name", type=str, location="json") - parser.add_argument("description", type=str, location="json") + parser.add_argument("description", type=_validate_description_length, location="json") parser.add_argument("icon_type", type=str, location="json") parser.add_argument("icon", type=str, location="json") parser.add_argument("icon_background", type=str, location="json") diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 93f82e8e24..2befd2a651 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -41,7 +41,7 @@ def _validate_name(name): def _validate_description_length(description): - if len(description) > 400: + if description and len(description) > 400: raise ValueError("Description cannot exceed 400 characters.") return description @@ -113,7 +113,7 @@ class DatasetListApi(Resource): ) parser.add_argument( "description", - type=str, + type=_validate_description_length, nullable=True, required=False, default="", diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index a499719fc3..29eef41253 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -29,7 +29,7 @@ def _validate_name(name): def _validate_description_length(description): - if len(description) > 400: + if description and len(description) > 400: raise ValueError("Description cannot exceed 400 characters.") return description @@ -87,7 +87,7 @@ class DatasetListApi(DatasetApiResource): ) parser.add_argument( "description", - type=str, + type=_validate_description_length, nullable=True, required=False, default="", diff --git a/api/tests/integration_tests/controllers/console/app/test_description_validation.py b/api/tests/integration_tests/controllers/console/app/test_description_validation.py new file mode 100644 index 0000000000..2d0ceac760 --- /dev/null +++ b/api/tests/integration_tests/controllers/console/app/test_description_validation.py @@ -0,0 +1,168 @@ +""" +Unit tests for App description validation functions. + +This test module validates the 400-character limit enforcement +for App descriptions across all creation and editing endpoints. +""" + +import os +import sys + +import pytest + +# Add the API root to Python path for imports +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..", "..", "..")) + + +class TestAppDescriptionValidationUnit: + """Unit tests for description validation function""" + + def test_validate_description_length_function(self): + """Test the _validate_description_length function directly""" + from controllers.console.app.app import _validate_description_length + + # Test valid descriptions + assert _validate_description_length("") == "" + assert _validate_description_length("x" * 400) == "x" * 400 + assert _validate_description_length(None) is None + + # Test invalid descriptions + with pytest.raises(ValueError) as exc_info: + _validate_description_length("x" * 401) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + _validate_description_length("x" * 500) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + _validate_description_length("x" * 1000) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + def test_validation_consistency_with_dataset(self): + """Test that App and Dataset validation functions are consistent""" + from controllers.console.app.app import _validate_description_length as app_validate + from controllers.console.datasets.datasets import _validate_description_length as dataset_validate + from controllers.service_api.dataset.dataset import _validate_description_length as service_dataset_validate + + # Test same valid inputs + valid_desc = "x" * 400 + assert app_validate(valid_desc) == dataset_validate(valid_desc) == service_dataset_validate(valid_desc) + assert app_validate("") == dataset_validate("") == service_dataset_validate("") + assert app_validate(None) == dataset_validate(None) == service_dataset_validate(None) + + # Test same invalid inputs produce same error + invalid_desc = "x" * 401 + + app_error = None + dataset_error = None + service_dataset_error = None + + try: + app_validate(invalid_desc) + except ValueError as e: + app_error = str(e) + + try: + dataset_validate(invalid_desc) + except ValueError as e: + dataset_error = str(e) + + try: + service_dataset_validate(invalid_desc) + except ValueError as e: + service_dataset_error = str(e) + + assert app_error == dataset_error == service_dataset_error + assert app_error == "Description cannot exceed 400 characters." + + def test_boundary_values(self): + """Test boundary values for description validation""" + from controllers.console.app.app import _validate_description_length + + # Test exact boundary + exactly_400 = "x" * 400 + assert _validate_description_length(exactly_400) == exactly_400 + + # Test just over boundary + just_over_400 = "x" * 401 + with pytest.raises(ValueError): + _validate_description_length(just_over_400) + + # Test just under boundary + just_under_400 = "x" * 399 + assert _validate_description_length(just_under_400) == just_under_400 + + def test_edge_cases(self): + """Test edge cases for description validation""" + from controllers.console.app.app import _validate_description_length + + # Test None input + assert _validate_description_length(None) is None + + # Test empty string + assert _validate_description_length("") == "" + + # Test single character + assert _validate_description_length("a") == "a" + + # Test unicode characters + unicode_desc = "测试" * 200 # 400 characters in Chinese + assert _validate_description_length(unicode_desc) == unicode_desc + + # Test unicode over limit + unicode_over = "测试" * 201 # 402 characters + with pytest.raises(ValueError): + _validate_description_length(unicode_over) + + def test_whitespace_handling(self): + """Test how validation handles whitespace""" + from controllers.console.app.app import _validate_description_length + + # Test description with spaces + spaces_400 = " " * 400 + assert _validate_description_length(spaces_400) == spaces_400 + + # Test description with spaces over limit + spaces_401 = " " * 401 + with pytest.raises(ValueError): + _validate_description_length(spaces_401) + + # Test mixed content + mixed_400 = "a" * 200 + " " * 200 + assert _validate_description_length(mixed_400) == mixed_400 + + # Test mixed over limit + mixed_401 = "a" * 200 + " " * 201 + with pytest.raises(ValueError): + _validate_description_length(mixed_401) + + +if __name__ == "__main__": + # Run tests directly + import traceback + + test_instance = TestAppDescriptionValidationUnit() + test_methods = [method for method in dir(test_instance) if method.startswith("test_")] + + passed = 0 + failed = 0 + + for test_method in test_methods: + try: + print(f"Running {test_method}...") + getattr(test_instance, test_method)() + print(f"✅ {test_method} PASSED") + passed += 1 + except Exception as e: + print(f"❌ {test_method} FAILED: {str(e)}") + traceback.print_exc() + failed += 1 + + print(f"\n📊 Test Results: {passed} passed, {failed} failed") + + if failed == 0: + print("🎉 All tests passed!") + else: + print("💥 Some tests failed!") + sys.exit(1) diff --git a/api/tests/unit_tests/controllers/console/app/test_description_validation.py b/api/tests/unit_tests/controllers/console/app/test_description_validation.py new file mode 100644 index 0000000000..178267e560 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/app/test_description_validation.py @@ -0,0 +1,252 @@ +import pytest + +from controllers.console.app.app import _validate_description_length as app_validate +from controllers.console.datasets.datasets import _validate_description_length as dataset_validate +from controllers.service_api.dataset.dataset import _validate_description_length as service_dataset_validate + + +class TestDescriptionValidationUnit: + """Unit tests for description validation functions in App and Dataset APIs""" + + def test_app_validate_description_length_valid(self): + """Test App validation function with valid descriptions""" + # Empty string should be valid + assert app_validate("") == "" + + # None should be valid + assert app_validate(None) is None + + # Short description should be valid + short_desc = "Short description" + assert app_validate(short_desc) == short_desc + + # Exactly 400 characters should be valid + exactly_400 = "x" * 400 + assert app_validate(exactly_400) == exactly_400 + + # Just under limit should be valid + just_under = "x" * 399 + assert app_validate(just_under) == just_under + + def test_app_validate_description_length_invalid(self): + """Test App validation function with invalid descriptions""" + # 401 characters should fail + just_over = "x" * 401 + with pytest.raises(ValueError) as exc_info: + app_validate(just_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + # 500 characters should fail + way_over = "x" * 500 + with pytest.raises(ValueError) as exc_info: + app_validate(way_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + # 1000 characters should fail + very_long = "x" * 1000 + with pytest.raises(ValueError) as exc_info: + app_validate(very_long) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + def test_dataset_validate_description_length_valid(self): + """Test Dataset validation function with valid descriptions""" + # Empty string should be valid + assert dataset_validate("") == "" + + # Short description should be valid + short_desc = "Short description" + assert dataset_validate(short_desc) == short_desc + + # Exactly 400 characters should be valid + exactly_400 = "x" * 400 + assert dataset_validate(exactly_400) == exactly_400 + + # Just under limit should be valid + just_under = "x" * 399 + assert dataset_validate(just_under) == just_under + + def test_dataset_validate_description_length_invalid(self): + """Test Dataset validation function with invalid descriptions""" + # 401 characters should fail + just_over = "x" * 401 + with pytest.raises(ValueError) as exc_info: + dataset_validate(just_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + # 500 characters should fail + way_over = "x" * 500 + with pytest.raises(ValueError) as exc_info: + dataset_validate(way_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + def test_service_dataset_validate_description_length_valid(self): + """Test Service Dataset validation function with valid descriptions""" + # Empty string should be valid + assert service_dataset_validate("") == "" + + # None should be valid + assert service_dataset_validate(None) is None + + # Short description should be valid + short_desc = "Short description" + assert service_dataset_validate(short_desc) == short_desc + + # Exactly 400 characters should be valid + exactly_400 = "x" * 400 + assert service_dataset_validate(exactly_400) == exactly_400 + + # Just under limit should be valid + just_under = "x" * 399 + assert service_dataset_validate(just_under) == just_under + + def test_service_dataset_validate_description_length_invalid(self): + """Test Service Dataset validation function with invalid descriptions""" + # 401 characters should fail + just_over = "x" * 401 + with pytest.raises(ValueError) as exc_info: + service_dataset_validate(just_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + # 500 characters should fail + way_over = "x" * 500 + with pytest.raises(ValueError) as exc_info: + service_dataset_validate(way_over) + assert "Description cannot exceed 400 characters." in str(exc_info.value) + + def test_app_dataset_validation_consistency(self): + """Test that App and Dataset validation functions behave identically""" + test_cases = [ + "", # Empty string + "Short description", # Normal description + "x" * 100, # Medium description + "x" * 400, # Exactly at limit + ] + + # Test valid cases produce same results + for test_desc in test_cases: + assert app_validate(test_desc) == dataset_validate(test_desc) == service_dataset_validate(test_desc) + + # Test invalid cases produce same errors + invalid_cases = [ + "x" * 401, # Just over limit + "x" * 500, # Way over limit + "x" * 1000, # Very long + ] + + for invalid_desc in invalid_cases: + app_error = None + dataset_error = None + service_dataset_error = None + + # Capture App validation error + try: + app_validate(invalid_desc) + except ValueError as e: + app_error = str(e) + + # Capture Dataset validation error + try: + dataset_validate(invalid_desc) + except ValueError as e: + dataset_error = str(e) + + # Capture Service Dataset validation error + try: + service_dataset_validate(invalid_desc) + except ValueError as e: + service_dataset_error = str(e) + + # All should produce errors + assert app_error is not None, f"App validation should fail for {len(invalid_desc)} characters" + assert dataset_error is not None, f"Dataset validation should fail for {len(invalid_desc)} characters" + error_msg = f"Service Dataset validation should fail for {len(invalid_desc)} characters" + assert service_dataset_error is not None, error_msg + + # Errors should be identical + error_msg = f"Error messages should be identical for {len(invalid_desc)} characters" + assert app_error == dataset_error == service_dataset_error, error_msg + assert app_error == "Description cannot exceed 400 characters." + + def test_boundary_values(self): + """Test boundary values around the 400 character limit""" + boundary_tests = [ + (0, True), # Empty + (1, True), # Minimum + (399, True), # Just under limit + (400, True), # Exactly at limit + (401, False), # Just over limit + (402, False), # Over limit + (500, False), # Way over limit + ] + + for length, should_pass in boundary_tests: + test_desc = "x" * length + + if should_pass: + # Should not raise exception + assert app_validate(test_desc) == test_desc + assert dataset_validate(test_desc) == test_desc + assert service_dataset_validate(test_desc) == test_desc + else: + # Should raise ValueError + with pytest.raises(ValueError): + app_validate(test_desc) + with pytest.raises(ValueError): + dataset_validate(test_desc) + with pytest.raises(ValueError): + service_dataset_validate(test_desc) + + def test_special_characters(self): + """Test validation with special characters, Unicode, etc.""" + # Unicode characters + unicode_desc = "测试描述" * 100 # Chinese characters + if len(unicode_desc) <= 400: + assert app_validate(unicode_desc) == unicode_desc + assert dataset_validate(unicode_desc) == unicode_desc + assert service_dataset_validate(unicode_desc) == unicode_desc + + # Special characters + special_desc = "Special chars: !@#$%^&*()_+-=[]{}|;':\",./<>?" * 10 + if len(special_desc) <= 400: + assert app_validate(special_desc) == special_desc + assert dataset_validate(special_desc) == special_desc + assert service_dataset_validate(special_desc) == special_desc + + # Mixed content + mixed_desc = "Mixed content: 测试 123 !@# " * 15 + if len(mixed_desc) <= 400: + assert app_validate(mixed_desc) == mixed_desc + assert dataset_validate(mixed_desc) == mixed_desc + assert service_dataset_validate(mixed_desc) == mixed_desc + elif len(mixed_desc) > 400: + with pytest.raises(ValueError): + app_validate(mixed_desc) + with pytest.raises(ValueError): + dataset_validate(mixed_desc) + with pytest.raises(ValueError): + service_dataset_validate(mixed_desc) + + def test_whitespace_handling(self): + """Test validation with various whitespace scenarios""" + # Leading/trailing whitespace + whitespace_desc = " Description with whitespace " + if len(whitespace_desc) <= 400: + assert app_validate(whitespace_desc) == whitespace_desc + assert dataset_validate(whitespace_desc) == whitespace_desc + assert service_dataset_validate(whitespace_desc) == whitespace_desc + + # Newlines and tabs + multiline_desc = "Line 1\nLine 2\tTabbed content" + if len(multiline_desc) <= 400: + assert app_validate(multiline_desc) == multiline_desc + assert dataset_validate(multiline_desc) == multiline_desc + assert service_dataset_validate(multiline_desc) == multiline_desc + + # Only whitespace over limit + only_spaces = " " * 401 + with pytest.raises(ValueError): + app_validate(only_spaces) + with pytest.raises(ValueError): + dataset_validate(only_spaces) + with pytest.raises(ValueError): + service_dataset_validate(only_spaces) diff --git a/web/__tests__/description-validation.test.tsx b/web/__tests__/description-validation.test.tsx new file mode 100644 index 0000000000..85263b035f --- /dev/null +++ b/web/__tests__/description-validation.test.tsx @@ -0,0 +1,97 @@ +/** + * Description Validation Test + * + * Tests for the 400-character description validation across App and Dataset + * creation and editing workflows to ensure consistent validation behavior. + */ + +describe('Description Validation Logic', () => { + // Simulate backend validation function + const validateDescriptionLength = (description?: string | null) => { + if (description && description.length > 400) + throw new Error('Description cannot exceed 400 characters.') + + return description + } + + describe('Backend Validation Function', () => { + test('allows description within 400 characters', () => { + const validDescription = 'x'.repeat(400) + expect(() => validateDescriptionLength(validDescription)).not.toThrow() + expect(validateDescriptionLength(validDescription)).toBe(validDescription) + }) + + test('allows empty description', () => { + expect(() => validateDescriptionLength('')).not.toThrow() + expect(() => validateDescriptionLength(null)).not.toThrow() + expect(() => validateDescriptionLength(undefined)).not.toThrow() + }) + + test('rejects description exceeding 400 characters', () => { + const invalidDescription = 'x'.repeat(401) + expect(() => validateDescriptionLength(invalidDescription)).toThrow( + 'Description cannot exceed 400 characters.', + ) + }) + }) + + describe('Backend Validation Consistency', () => { + test('App and Dataset have consistent validation limits', () => { + const maxLength = 400 + const validDescription = 'x'.repeat(maxLength) + const invalidDescription = 'x'.repeat(maxLength + 1) + + // Both should accept exactly 400 characters + expect(validDescription.length).toBe(400) + expect(() => validateDescriptionLength(validDescription)).not.toThrow() + + // Both should reject 401 characters + expect(invalidDescription.length).toBe(401) + expect(() => validateDescriptionLength(invalidDescription)).toThrow() + }) + + test('validation error messages are consistent', () => { + const expectedErrorMessage = 'Description cannot exceed 400 characters.' + + // This would be the error message from both App and Dataset backend validation + expect(expectedErrorMessage).toBe('Description cannot exceed 400 characters.') + + const invalidDescription = 'x'.repeat(401) + try { + validateDescriptionLength(invalidDescription) + } + catch (error) { + expect((error as Error).message).toBe(expectedErrorMessage) + } + }) + }) + + describe('Character Length Edge Cases', () => { + const testCases = [ + { length: 0, shouldPass: true, description: 'empty description' }, + { length: 1, shouldPass: true, description: '1 character' }, + { length: 399, shouldPass: true, description: '399 characters' }, + { length: 400, shouldPass: true, description: '400 characters (boundary)' }, + { length: 401, shouldPass: false, description: '401 characters (over limit)' }, + { length: 500, shouldPass: false, description: '500 characters' }, + { length: 1000, shouldPass: false, description: '1000 characters' }, + ] + + testCases.forEach(({ length, shouldPass, description }) => { + test(`handles ${description} correctly`, () => { + const testDescription = length > 0 ? 'x'.repeat(length) : '' + expect(testDescription.length).toBe(length) + + if (shouldPass) { + expect(() => validateDescriptionLength(testDescription)).not.toThrow() + expect(validateDescriptionLength(testDescription)).toBe(testDescription) + } + else { + expect(() => validateDescriptionLength(testDescription)).toThrow( + 'Description cannot exceed 400 characters.', + ) + } + }) + }) + }) +}) diff --git a/web/app/components/app/create-app-modal/index.tsx b/web/app/components/app/create-app-modal/index.tsx index c37f7b051a..70a45a4bbe 100644 --- a/web/app/components/app/create-app-modal/index.tsx +++ b/web/app/components/app/create-app-modal/index.tsx @@ -82,8 +82,11 @@ function CreateApp({ onClose, onSuccess, onCreateFromTemplate }: CreateAppProps) localStorage.setItem(NEED_REFRESH_APP_LIST_KEY, '1') getRedirection(isCurrentWorkspaceEditor, app, push) } - catch { - notify({ type: 'error', message: t('app.newApp.appCreateFailed') }) + catch (e: any) { + notify({ + type: 'error', + message: e.message || t('app.newApp.appCreateFailed'), + }) } isCreatingRef.current = false }, [name, notify, t, appMode, appIcon, description, onSuccess, onClose, push, isCurrentWorkspaceEditor]) diff --git a/web/app/components/apps/app-card.tsx b/web/app/components/apps/app-card.tsx index a91c2edf1e..688da4c25d 100644 --- a/web/app/components/apps/app-card.tsx +++ b/web/app/components/apps/app-card.tsx @@ -117,8 +117,11 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => { if (onRefresh) onRefresh() } - catch { - notify({ type: 'error', message: t('app.editFailed') }) + catch (e: any) { + notify({ + type: 'error', + message: e.message || t('app.editFailed'), + }) } }, [app.id, notify, onRefresh, t]) @@ -364,7 +367,7 @@ const AppCard = ({ app, onRefresh }: AppCardProps) => {
    {app.description} From c8c591d73cb35f093e943be2b1d64f11046b3673 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Fri, 8 Aug 2025 10:07:59 +0900 Subject: [PATCH 184/415] Fix incorrect exception handling in db query (#23582) Signed-off-by: Yongtao Huang --- api/commands.py | 14 +++++++------- api/schedule/clean_embedding_cache_task.py | 6 +++--- api/schedule/clean_messages.py | 6 +++--- api/schedule/clean_unused_datasets_task.py | 10 +++++----- api/services/workflow_draft_variable_service.py | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/api/commands.py b/api/commands.py index 8177f1a48c..8ee52ba716 100644 --- a/api/commands.py +++ b/api/commands.py @@ -9,7 +9,7 @@ import sqlalchemy as sa from flask import current_app from pydantic import TypeAdapter from sqlalchemy import select -from werkzeug.exceptions import NotFound +from sqlalchemy.exc import SQLAlchemyError from configs import dify_config from constants.languages import languages @@ -181,8 +181,8 @@ def migrate_annotation_vector_database(): ) if not apps: break - except NotFound: - break + except SQLAlchemyError: + raise page += 1 for app in apps: @@ -308,8 +308,8 @@ def migrate_knowledge_vector_database(): ) datasets = db.paginate(select=stmt, page=page, per_page=50, max_per_page=50, error_out=False) - except NotFound: - break + except SQLAlchemyError: + raise page += 1 for dataset in datasets: @@ -561,8 +561,8 @@ def old_metadata_migration(): .order_by(DatasetDocument.created_at.desc()) ) documents = db.paginate(select=stmt, page=page, per_page=50, max_per_page=50, error_out=False) - except NotFound: - break + except SQLAlchemyError: + raise if not documents: break for document in documents: diff --git a/api/schedule/clean_embedding_cache_task.py b/api/schedule/clean_embedding_cache_task.py index 2298acf6eb..2b74fb2dd0 100644 --- a/api/schedule/clean_embedding_cache_task.py +++ b/api/schedule/clean_embedding_cache_task.py @@ -3,7 +3,7 @@ import time import click from sqlalchemy import text -from werkzeug.exceptions import NotFound +from sqlalchemy.exc import SQLAlchemyError import app from configs import dify_config @@ -27,8 +27,8 @@ def clean_embedding_cache_task(): .all() ) embedding_ids = [embedding_id[0] for embedding_id in embedding_ids] - except NotFound: - break + except SQLAlchemyError: + raise if embedding_ids: for embedding_id in embedding_ids: db.session.execute( diff --git a/api/schedule/clean_messages.py b/api/schedule/clean_messages.py index 4c35745959..a896c818a5 100644 --- a/api/schedule/clean_messages.py +++ b/api/schedule/clean_messages.py @@ -3,7 +3,7 @@ import logging import time import click -from werkzeug.exceptions import NotFound +from sqlalchemy.exc import SQLAlchemyError import app from configs import dify_config @@ -42,8 +42,8 @@ def clean_messages(): .all() ) - except NotFound: - break + except SQLAlchemyError: + raise if not messages: break for message in messages: diff --git a/api/schedule/clean_unused_datasets_task.py b/api/schedule/clean_unused_datasets_task.py index 7887835bc5..940da5309e 100644 --- a/api/schedule/clean_unused_datasets_task.py +++ b/api/schedule/clean_unused_datasets_task.py @@ -3,7 +3,7 @@ import time import click from sqlalchemy import func, select -from werkzeug.exceptions import NotFound +from sqlalchemy.exc import SQLAlchemyError import app from configs import dify_config @@ -65,8 +65,8 @@ def clean_unused_datasets_task(): datasets = db.paginate(stmt, page=1, per_page=50) - except NotFound: - break + except SQLAlchemyError: + raise if datasets.items is None or len(datasets.items) == 0: break for dataset in datasets: @@ -146,8 +146,8 @@ def clean_unused_datasets_task(): ) datasets = db.paginate(stmt, page=1, per_page=50) - except NotFound: - break + except SQLAlchemyError: + raise if datasets.items is None or len(datasets.items) == 0: break for dataset in datasets: diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index 2d62d49d91..6bbb3bca04 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -256,7 +256,7 @@ class WorkflowDraftVariableService: def _reset_node_var_or_sys_var( self, workflow: Workflow, variable: WorkflowDraftVariable ) -> WorkflowDraftVariable | None: - # If a variable does not allow updating, it makes no sence to resetting it. + # If a variable does not allow updating, it makes no sense to reset it. if not variable.editable: return variable # No execution record for this variable, delete the variable instead. @@ -478,7 +478,7 @@ def _batch_upsert_draft_variable( "node_execution_id": stmt.excluded.node_execution_id, }, ) - elif _UpsertPolicy.IGNORE: + elif policy == _UpsertPolicy.IGNORE: stmt = stmt.on_conflict_do_nothing(index_elements=WorkflowDraftVariable.unique_app_id_node_id_name()) else: raise Exception("Invalid value for update policy.") From 4b0480c8b3c2b75f64176d18f01ed598d0f86656 Mon Sep 17 00:00:00 2001 From: Will Date: Fri, 8 Aug 2025 09:08:14 +0800 Subject: [PATCH 185/415] feat: improved MCP timeout (#23546) --- .../console/workspace/tool_providers.py | 10 ++++ api/core/mcp/client/sse_client.py | 2 +- api/core/mcp/client/streamable_client.py | 26 +++++----- api/core/mcp/mcp_client.py | 28 +++++++---- api/core/mcp/session/base_session.py | 8 +--- api/core/mcp/session/client_session.py | 5 +- api/core/tools/__base/tool_provider.py | 2 - api/core/tools/mcp_tool/provider.py | 30 +++++++++--- api/core/tools/mcp_tool/tool.py | 27 ++++++++++- api/core/tools/tool_manager.py | 3 -- ...f407_add_timeout_for_tool_mcp_providers.py | 47 +++++++++++++++++++ api/models/tools.py | 2 + .../tools/mcp_tools_manage_service.py | 10 ++++ 13 files changed, 153 insertions(+), 47 deletions(-) create mode 100644 api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index c4d1ef70d8..8c8b73b45d 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -862,6 +862,10 @@ class ToolProviderMCPApi(Resource): parser.add_argument("icon_type", type=str, required=True, nullable=False, location="json") parser.add_argument("icon_background", type=str, required=False, nullable=True, location="json", default="") parser.add_argument("server_identifier", type=str, required=True, nullable=False, location="json") + parser.add_argument("timeout", type=float, required=False, nullable=False, location="json", default=30) + parser.add_argument( + "sse_read_timeout", type=float, required=False, nullable=False, location="json", default=300 + ) args = parser.parse_args() user = current_user if not is_valid_url(args["server_url"]): @@ -876,6 +880,8 @@ class ToolProviderMCPApi(Resource): icon_background=args["icon_background"], user_id=user.id, server_identifier=args["server_identifier"], + timeout=args["timeout"], + sse_read_timeout=args["sse_read_timeout"], ) ) @@ -891,6 +897,8 @@ class ToolProviderMCPApi(Resource): parser.add_argument("icon_background", type=str, required=False, nullable=True, location="json") parser.add_argument("provider_id", type=str, required=True, nullable=False, location="json") parser.add_argument("server_identifier", type=str, required=True, nullable=False, location="json") + parser.add_argument("timeout", type=float, required=False, nullable=True, location="json") + parser.add_argument("sse_read_timeout", type=float, required=False, nullable=True, location="json") args = parser.parse_args() if not is_valid_url(args["server_url"]): if "[__HIDDEN__]" in args["server_url"]: @@ -906,6 +914,8 @@ class ToolProviderMCPApi(Resource): icon_type=args["icon_type"], icon_background=args["icon_background"], server_identifier=args["server_identifier"], + timeout=args.get("timeout"), + sse_read_timeout=args.get("sse_read_timeout"), ) return {"result": "success"} diff --git a/api/core/mcp/client/sse_client.py b/api/core/mcp/client/sse_client.py index 4226e77f7e..2d3a3f5344 100644 --- a/api/core/mcp/client/sse_client.py +++ b/api/core/mcp/client/sse_client.py @@ -327,7 +327,7 @@ def send_message(http_client: httpx.Client, endpoint_url: str, session_message: ) response.raise_for_status() logger.debug("Client message sent successfully: %s", response.status_code) - except Exception as exc: + except Exception: logger.exception("Error sending message") raise diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index ca414ebb93..14e346c2f3 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -55,14 +55,10 @@ DEFAULT_QUEUE_READ_TIMEOUT = 3 class StreamableHTTPError(Exception): """Base exception for StreamableHTTP transport errors.""" - pass - class ResumptionError(StreamableHTTPError): """Raised when resumption request is invalid.""" - pass - @dataclass class RequestContext: @@ -74,7 +70,7 @@ class RequestContext: session_message: SessionMessage metadata: ClientMessageMetadata | None server_to_client_queue: ServerToClientQueue # Renamed for clarity - sse_read_timeout: timedelta + sse_read_timeout: float class StreamableHTTPTransport: @@ -84,8 +80,8 @@ class StreamableHTTPTransport: self, url: str, headers: dict[str, Any] | None = None, - timeout: timedelta = timedelta(seconds=30), - sse_read_timeout: timedelta = timedelta(seconds=60 * 5), + timeout: float | timedelta = 30, + sse_read_timeout: float | timedelta = 60 * 5, ) -> None: """Initialize the StreamableHTTP transport. @@ -97,8 +93,10 @@ class StreamableHTTPTransport: """ self.url = url self.headers = headers or {} - self.timeout = timeout - self.sse_read_timeout = sse_read_timeout + self.timeout = timeout.total_seconds() if isinstance(timeout, timedelta) else timeout + self.sse_read_timeout = ( + sse_read_timeout.total_seconds() if isinstance(sse_read_timeout, timedelta) else sse_read_timeout + ) self.session_id: str | None = None self.request_headers = { ACCEPT: f"{JSON}, {SSE}", @@ -186,7 +184,7 @@ class StreamableHTTPTransport: with ssrf_proxy_sse_connect( self.url, headers=headers, - timeout=httpx.Timeout(self.timeout.seconds, read=self.sse_read_timeout.seconds), + timeout=httpx.Timeout(self.timeout, read=self.sse_read_timeout), client=client, method="GET", ) as event_source: @@ -215,7 +213,7 @@ class StreamableHTTPTransport: with ssrf_proxy_sse_connect( self.url, headers=headers, - timeout=httpx.Timeout(self.timeout.seconds, read=ctx.sse_read_timeout.seconds), + timeout=httpx.Timeout(self.timeout, read=self.sse_read_timeout), client=ctx.client, method="GET", ) as event_source: @@ -402,8 +400,8 @@ class StreamableHTTPTransport: def streamablehttp_client( url: str, headers: dict[str, Any] | None = None, - timeout: timedelta = timedelta(seconds=30), - sse_read_timeout: timedelta = timedelta(seconds=60 * 5), + timeout: float | timedelta = 30, + sse_read_timeout: float | timedelta = 60 * 5, terminate_on_close: bool = True, ) -> Generator[ tuple[ @@ -436,7 +434,7 @@ def streamablehttp_client( try: with create_ssrf_proxy_mcp_http_client( headers=transport.request_headers, - timeout=httpx.Timeout(transport.timeout.seconds, read=transport.sse_read_timeout.seconds), + timeout=httpx.Timeout(transport.timeout, read=transport.sse_read_timeout), ) as client: # Define callbacks that need access to thread pool def start_get_stream() -> None: diff --git a/api/core/mcp/mcp_client.py b/api/core/mcp/mcp_client.py index 875d13de05..7d90d51956 100644 --- a/api/core/mcp/mcp_client.py +++ b/api/core/mcp/mcp_client.py @@ -23,12 +23,18 @@ class MCPClient: authed: bool = True, authorization_code: Optional[str] = None, for_list: bool = False, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, ): # Initialize info self.provider_id = provider_id self.tenant_id = tenant_id self.client_type = "streamable" self.server_url = server_url + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout # Authentication info self.authed = authed @@ -43,7 +49,7 @@ class MCPClient: self._session: Optional[ClientSession] = None self._streams_context: Optional[AbstractContextManager[Any]] = None self._session_context: Optional[ClientSession] = None - self.exit_stack = ExitStack() + self._exit_stack = ExitStack() # Whether the client has been initialized self._initialized = False @@ -90,21 +96,26 @@ class MCPClient: headers = ( {"Authorization": f"{self.token.token_type.capitalize()} {self.token.access_token}"} if self.authed and self.token - else {} + else self.headers + ) + self._streams_context = client_factory( + url=self.server_url, + headers=headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) - self._streams_context = client_factory(url=self.server_url, headers=headers) if not self._streams_context: raise MCPConnectionError("Failed to create connection context") # Use exit_stack to manage context managers properly if method_name == "mcp": - read_stream, write_stream, _ = self.exit_stack.enter_context(self._streams_context) + read_stream, write_stream, _ = self._exit_stack.enter_context(self._streams_context) streams = (read_stream, write_stream) else: # sse_client - streams = self.exit_stack.enter_context(self._streams_context) + streams = self._exit_stack.enter_context(self._streams_context) self._session_context = ClientSession(*streams) - self._session = self.exit_stack.enter_context(self._session_context) + self._session = self._exit_stack.enter_context(self._session_context) session = cast(ClientSession, self._session) session.initialize() return @@ -120,9 +131,6 @@ class MCPClient: if first_try: return self.connect_server(client_factory, method_name, first_try=False) - except MCPConnectionError: - raise - def list_tools(self) -> list[Tool]: """Connect to an MCP server running with SSE transport""" # List available tools to verify connection @@ -142,7 +150,7 @@ class MCPClient: """Clean up resources""" try: # ExitStack will handle proper cleanup of all managed context managers - self.exit_stack.close() + self._exit_stack.close() except Exception as e: logging.exception("Error during cleanup") raise ValueError(f"Error during cleanup: {e}") diff --git a/api/core/mcp/session/base_session.py b/api/core/mcp/session/base_session.py index 3b6c9a7424..3f98aa94ae 100644 --- a/api/core/mcp/session/base_session.py +++ b/api/core/mcp/session/base_session.py @@ -2,7 +2,6 @@ import logging import queue from collections.abc import Callable from concurrent.futures import Future, ThreadPoolExecutor, TimeoutError -from contextlib import ExitStack from datetime import timedelta from types import TracebackType from typing import Any, Generic, Self, TypeVar @@ -170,7 +169,6 @@ class BaseSession( self._receive_notification_type = receive_notification_type self._session_read_timeout_seconds = read_timeout_seconds self._in_flight = {} - self._exit_stack = ExitStack() # Initialize executor and future to None for proper cleanup checks self._executor: ThreadPoolExecutor | None = None self._receiver_future: Future | None = None @@ -377,7 +375,7 @@ class BaseSession( self._handle_incoming(RuntimeError(f"Server Error: {message}")) except queue.Empty: continue - except Exception as e: + except Exception: logging.exception("Error in message processing loop") raise @@ -389,14 +387,12 @@ class BaseSession( If the request is responded to within this method, it will not be forwarded on to the message stream. """ - pass def _received_notification(self, notification: ReceiveNotificationT) -> None: """ Can be overridden by subclasses to handle a notification without needing to listen on the message stream. """ - pass def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None @@ -405,11 +401,9 @@ class BaseSession( Sends a progress notification for a request that is currently being processed. """ - pass def _handle_incoming( self, req: RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception, ) -> None: """A generic handler for incoming messages. Overwritten by subclasses.""" - pass diff --git a/api/core/mcp/session/client_session.py b/api/core/mcp/session/client_session.py index ed2ad508ab..1bccf1d031 100644 --- a/api/core/mcp/session/client_session.py +++ b/api/core/mcp/session/client_session.py @@ -1,3 +1,4 @@ +import queue from datetime import timedelta from typing import Any, Protocol @@ -85,8 +86,8 @@ class ClientSession( ): def __init__( self, - read_stream, - write_stream, + read_stream: queue.Queue, + write_stream: queue.Queue, read_timeout_seconds: timedelta | None = None, sampling_callback: SamplingFnT | None = None, list_roots_callback: ListRootsFnT | None = None, diff --git a/api/core/tools/__base/tool_provider.py b/api/core/tools/__base/tool_provider.py index d096fc7df7..d1d7976cc3 100644 --- a/api/core/tools/__base/tool_provider.py +++ b/api/core/tools/__base/tool_provider.py @@ -12,8 +12,6 @@ from core.tools.errors import ToolProviderCredentialValidationError class ToolProviderController(ABC): - entity: ToolProviderEntity - def __init__(self, entity: ToolProviderEntity) -> None: self.entity = entity diff --git a/api/core/tools/mcp_tool/provider.py b/api/core/tools/mcp_tool/provider.py index 93f003effe..24ee981a1b 100644 --- a/api/core/tools/mcp_tool/provider.py +++ b/api/core/tools/mcp_tool/provider.py @@ -1,5 +1,5 @@ import json -from typing import Any +from typing import Any, Optional from core.mcp.types import Tool as RemoteMCPTool from core.tools.__base.tool_provider import ToolProviderController @@ -19,15 +19,24 @@ from services.tools.tools_transform_service import ToolTransformService class MCPToolProviderController(ToolProviderController): - provider_id: str - entity: ToolProviderEntityWithPlugin - - def __init__(self, entity: ToolProviderEntityWithPlugin, provider_id: str, tenant_id: str, server_url: str) -> None: + def __init__( + self, + entity: ToolProviderEntityWithPlugin, + provider_id: str, + tenant_id: str, + server_url: str, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, + ) -> None: super().__init__(entity) - self.entity = entity + self.entity: ToolProviderEntityWithPlugin = entity self.tenant_id = tenant_id self.provider_id = provider_id self.server_url = server_url + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout @property def provider_type(self) -> ToolProviderType: @@ -85,6 +94,9 @@ class MCPToolProviderController(ToolProviderController): provider_id=db_provider.server_identifier or "", tenant_id=db_provider.tenant_id or "", server_url=db_provider.decrypted_server_url, + headers={}, # TODO: get headers from db provider + timeout=db_provider.timeout, + sse_read_timeout=db_provider.sse_read_timeout, ) def _validate_credentials(self, user_id: str, credentials: dict[str, Any]) -> None: @@ -111,6 +123,9 @@ class MCPToolProviderController(ToolProviderController): icon=self.entity.identity.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) def get_tools(self) -> list[MCPTool]: # type: ignore @@ -125,6 +140,9 @@ class MCPToolProviderController(ToolProviderController): icon=self.entity.identity.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) for tool_entity in self.entity.tools ] diff --git a/api/core/tools/mcp_tool/tool.py b/api/core/tools/mcp_tool/tool.py index 8ebbb6b0fe..26789b23ce 100644 --- a/api/core/tools/mcp_tool/tool.py +++ b/api/core/tools/mcp_tool/tool.py @@ -13,13 +13,25 @@ from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, Too class MCPTool(Tool): def __init__( - self, entity: ToolEntity, runtime: ToolRuntime, tenant_id: str, icon: str, server_url: str, provider_id: str + self, + entity: ToolEntity, + runtime: ToolRuntime, + tenant_id: str, + icon: str, + server_url: str, + provider_id: str, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, ) -> None: super().__init__(entity, runtime) self.tenant_id = tenant_id self.icon = icon self.server_url = server_url self.provider_id = provider_id + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout def tool_provider_type(self) -> ToolProviderType: return ToolProviderType.MCP @@ -35,7 +47,15 @@ class MCPTool(Tool): from core.tools.errors import ToolInvokeError try: - with MCPClient(self.server_url, self.provider_id, self.tenant_id, authed=True) as mcp_client: + with MCPClient( + self.server_url, + self.provider_id, + self.tenant_id, + authed=True, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, + ) as mcp_client: tool_parameters = self._handle_none_parameter(tool_parameters) result = mcp_client.invoke_tool(tool_name=self.entity.identity.name, tool_args=tool_parameters) except MCPAuthError as e: @@ -72,6 +92,9 @@ class MCPTool(Tool): icon=self.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) def _handle_none_parameter(self, parameter: dict[str, Any]) -> dict[str, Any]: diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 2737bcfb16..7472f4f605 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -789,9 +789,6 @@ class ToolManager: """ get api provider """ - """ - get tool provider - """ provider_name = provider provider_obj: ApiToolProvider | None = ( db.session.query(ApiToolProvider) diff --git a/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py b/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py new file mode 100644 index 0000000000..eabead232e --- /dev/null +++ b/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py @@ -0,0 +1,47 @@ +"""add timeout for tool_mcp_providers + +Revision ID: fa8b0fa6f407 +Revises: 532b3f888abf +Create Date: 2025-08-07 11:15:31.517985 + +""" +from alembic import op +import models as models +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'fa8b0fa6f407' +down_revision = '532b3f888abf' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tool_mcp_providers', schema=None) as batch_op: + batch_op.add_column(sa.Column('timeout', sa.Float(), server_default=sa.text('30'), nullable=False)) + batch_op.add_column(sa.Column('sse_read_timeout', sa.Float(), server_default=sa.text('300'), nullable=False)) + + with op.batch_alter_table('workflow_node_executions', schema=None) as batch_op: + batch_op.drop_index(batch_op.f('workflow_node_execution_created_at_idx')) + + with op.batch_alter_table('workflow_runs', schema=None) as batch_op: + batch_op.drop_index(batch_op.f('workflow_run_created_at_idx')) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('workflow_runs', schema=None) as batch_op: + batch_op.create_index(batch_op.f('workflow_run_created_at_idx'), ['created_at'], unique=False) + + with op.batch_alter_table('workflow_node_executions', schema=None) as batch_op: + batch_op.create_index(batch_op.f('workflow_node_execution_created_at_idx'), ['created_at'], unique=False) + + with op.batch_alter_table('tool_mcp_providers', schema=None) as batch_op: + batch_op.drop_column('sse_read_timeout') + batch_op.drop_column('timeout') + + # ### end Alembic commands ### diff --git a/api/models/tools.py b/api/models/tools.py index 408c1371c2..e0c9fa6ffc 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -278,6 +278,8 @@ class MCPToolProvider(Base): updated_at: Mapped[datetime] = mapped_column( sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) + timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("30")) + sse_read_timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("300")) def load_user(self) -> Account | None: return db.session.query(Account).where(Account.id == self.user_id).first() diff --git a/api/services/tools/mcp_tools_manage_service.py b/api/services/tools/mcp_tools_manage_service.py index 23be449a5a..f45c931768 100644 --- a/api/services/tools/mcp_tools_manage_service.py +++ b/api/services/tools/mcp_tools_manage_service.py @@ -59,6 +59,8 @@ class MCPToolManageService: icon_type: str, icon_background: str, server_identifier: str, + timeout: float, + sse_read_timeout: float, ) -> ToolProviderApiEntity: server_url_hash = hashlib.sha256(server_url.encode()).hexdigest() existing_provider = ( @@ -91,6 +93,8 @@ class MCPToolManageService: tools="[]", icon=json.dumps({"content": icon, "background": icon_background}) if icon_type == "emoji" else icon, server_identifier=server_identifier, + timeout=timeout, + sse_read_timeout=sse_read_timeout, ) db.session.add(mcp_tool) db.session.commit() @@ -166,6 +170,8 @@ class MCPToolManageService: icon_type: str, icon_background: str, server_identifier: str, + timeout: float | None = None, + sse_read_timeout: float | None = None, ): mcp_provider = cls.get_mcp_provider_by_provider_id(provider_id, tenant_id) @@ -197,6 +203,10 @@ class MCPToolManageService: mcp_provider.tools = reconnect_result["tools"] mcp_provider.encrypted_credentials = reconnect_result["encrypted_credentials"] + if timeout is not None: + mcp_provider.timeout = timeout + if sse_read_timeout is not None: + mcp_provider.sse_read_timeout = sse_read_timeout db.session.commit() except IntegrityError as e: db.session.rollback() From 62772e8871b586185b8e13c898d5273f4a8c0301 Mon Sep 17 00:00:00 2001 From: yunqiqiliang <132561395+yunqiqiliang@users.noreply.github.com> Date: Fri, 8 Aug 2025 09:18:43 +0800 Subject: [PATCH 186/415] fix: ensure vector database cleanup on dataset deletion regardless of document presence (affects all 33 vector databases) (#23574) Co-authored-by: Claude Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .gitignore | 8 +- .../vdb/clickzetta/clickzetta_vector.py | 127 ++++++++++-------- api/tasks/clean_dataset_task.py | 12 +- .../vdb/clickzetta/test_clickzetta.py | 33 ++--- .../vdb/clickzetta/test_docker_integration.py | 22 +-- 5 files changed, 97 insertions(+), 105 deletions(-) diff --git a/.gitignore b/.gitignore index c60957db72..5c68d89a4d 100644 --- a/.gitignore +++ b/.gitignore @@ -215,10 +215,4 @@ mise.toml # AI Assistant .roo/ api/.env.backup - -# Clickzetta test credentials -.env.clickzetta -.env.clickzetta.test - -# Clickzetta plugin development folder (keep local, ignore for PR) -clickzetta/ +/clickzetta diff --git a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py index d295bab5aa..50a395a373 100644 --- a/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py +++ b/api/core/rag/datasource/vdb/clickzetta/clickzetta_vector.py @@ -3,7 +3,7 @@ import logging import queue import threading import uuid -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional import clickzetta # type: ignore from pydantic import BaseModel, model_validator @@ -82,7 +82,7 @@ class ClickzettaVector(BaseVector): super().__init__(collection_name) self._config = config self._table_name = collection_name.replace("-", "_").lower() # Ensure valid table name - self._connection: Optional["Connection"] = None + self._connection: Optional[Connection] = None self._init_connection() self._init_write_queue() @@ -95,7 +95,7 @@ class ClickzettaVector(BaseVector): service=self._config.service, workspace=self._config.workspace, vcluster=self._config.vcluster, - schema=self._config.schema_name + schema=self._config.schema_name, ) # Set session parameters for better string handling and performance optimization @@ -116,14 +116,12 @@ class ClickzettaVector(BaseVector): # Vector index optimization "SET cz.storage.parquet.vector.index.read.memory.cache = true", "SET cz.storage.parquet.vector.index.read.local.cache = false", - # Query optimization "SET cz.sql.table.scan.push.down.filter = true", "SET cz.sql.table.scan.enable.ensure.filter = true", "SET cz.storage.always.prefetch.internal = true", "SET cz.optimizer.generate.columns.always.valid = true", "SET cz.sql.index.prewhere.enabled = true", - # Storage optimization "SET cz.storage.parquet.enable.io.prefetch = false", "SET cz.optimizer.enable.mv.rewrite = false", @@ -132,17 +130,18 @@ class ClickzettaVector(BaseVector): "SET cz.sql.table.scan.enable.push.down.log = false", "SET cz.storage.use.file.format.local.stats = false", "SET cz.storage.local.file.object.cache.level = all", - # Job execution optimization "SET cz.sql.job.fast.mode = true", "SET cz.storage.parquet.non.contiguous.read = true", - "SET cz.sql.compaction.after.commit = true" + "SET cz.sql.compaction.after.commit = true", ] for hint in performance_hints: cursor.execute(hint) - logger.info("Applied %d performance optimization hints for ClickZetta vector operations", len(performance_hints)) + logger.info( + "Applied %d performance optimization hints for ClickZetta vector operations", len(performance_hints) + ) except Exception: # Catch any errors setting performance hints but continue with defaults @@ -298,9 +297,7 @@ class ClickzettaVector(BaseVector): logger.info("Created vector index: %s", index_name) except (RuntimeError, ValueError) as e: error_msg = str(e).lower() - if ("already exists" in error_msg or - "already has index" in error_msg or - "with the same type" in error_msg): + if "already exists" in error_msg or "already has index" in error_msg or "with the same type" in error_msg: logger.info("Vector index already exists: %s", e) else: logger.exception("Failed to create vector index") @@ -318,9 +315,11 @@ class ClickzettaVector(BaseVector): for idx in existing_indexes: idx_str = str(idx).lower() # More precise check: look for inverted index specifically on the content column - if ("inverted" in idx_str and - Field.CONTENT_KEY.value.lower() in idx_str and - (index_name.lower() in idx_str or f"idx_{self._table_name}_text" in idx_str)): + if ( + "inverted" in idx_str + and Field.CONTENT_KEY.value.lower() in idx_str + and (index_name.lower() in idx_str or f"idx_{self._table_name}_text" in idx_str) + ): logger.info("Inverted index already exists on column %s: %s", Field.CONTENT_KEY.value, idx) return except (RuntimeError, ValueError) as e: @@ -340,11 +339,12 @@ class ClickzettaVector(BaseVector): except (RuntimeError, ValueError) as e: error_msg = str(e).lower() # Handle ClickZetta specific error messages - if (("already exists" in error_msg or - "already has index" in error_msg or - "with the same type" in error_msg or - "cannot create inverted index" in error_msg) and - "already has index" in error_msg): + if ( + "already exists" in error_msg + or "already has index" in error_msg + or "with the same type" in error_msg + or "cannot create inverted index" in error_msg + ) and "already has index" in error_msg: logger.info("Inverted index already exists on column %s", Field.CONTENT_KEY.value) # Try to get the existing index name for logging try: @@ -360,7 +360,6 @@ class ClickzettaVector(BaseVector): logger.warning("Failed to create inverted index: %s", e) # Continue without inverted index - full-text search will fall back to LIKE - def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): """Add documents with embeddings to the collection.""" if not documents: @@ -370,14 +369,20 @@ class ClickzettaVector(BaseVector): total_batches = (len(documents) + batch_size - 1) // batch_size for i in range(0, len(documents), batch_size): - batch_docs = documents[i:i + batch_size] - batch_embeddings = embeddings[i:i + batch_size] + batch_docs = documents[i : i + batch_size] + batch_embeddings = embeddings[i : i + batch_size] # Execute batch insert through write queue self._execute_write(self._insert_batch, batch_docs, batch_embeddings, i, batch_size, total_batches) - def _insert_batch(self, batch_docs: list[Document], batch_embeddings: list[list[float]], - batch_index: int, batch_size: int, total_batches: int): + def _insert_batch( + self, + batch_docs: list[Document], + batch_embeddings: list[list[float]], + batch_index: int, + batch_size: int, + total_batches: int, + ): """Insert a batch of documents using parameterized queries (executed in write worker thread).""" if not batch_docs or not batch_embeddings: logger.warning("Empty batch provided, skipping insertion") @@ -411,7 +416,7 @@ class ClickzettaVector(BaseVector): # According to ClickZetta docs, vector should be formatted as array string # for external systems: '[1.0, 2.0, 3.0]' - vector_str = '[' + ','.join(map(str, embedding)) + ']' + vector_str = "[" + ",".join(map(str, embedding)) + "]" data_rows.append([doc_id, content, metadata_json, vector_str]) # Check if we have any valid data to insert @@ -438,13 +443,16 @@ class ClickzettaVector(BaseVector): cursor.executemany(insert_sql, data_rows) logger.info( - f"Inserted batch {batch_index // batch_size + 1}/{total_batches} " - f"({len(data_rows)} valid docs using parameterized query with VECTOR({vector_dimension}) cast)" + "Inserted batch %d/%d (%d valid docs using parameterized query with VECTOR(%d) cast)", + batch_index // batch_size + 1, + total_batches, + len(data_rows), + vector_dimension, ) except (RuntimeError, ValueError, TypeError, ConnectionError) as e: - logger.exception("Parameterized SQL execution failed for %d documents: %s", len(data_rows), e) + logger.exception("Parameterized SQL execution failed for %d documents", len(data_rows)) logger.exception("SQL template: %s", insert_sql) - logger.exception("Sample data row: %s", data_rows[0] if data_rows else 'None') + logger.exception("Sample data row: %s", data_rows[0] if data_rows else "None") raise def text_exists(self, id: str) -> bool: @@ -453,8 +461,7 @@ class ClickzettaVector(BaseVector): connection = self._ensure_connection() with connection.cursor() as cursor: cursor.execute( - f"SELECT COUNT(*) FROM {self._config.schema_name}.{self._table_name} WHERE id = ?", - [safe_id] + f"SELECT COUNT(*) FROM {self._config.schema_name}.{self._table_name} WHERE id = ?", [safe_id] ) result = cursor.fetchone() return result[0] > 0 if result else False @@ -500,8 +507,10 @@ class ClickzettaVector(BaseVector): # Using JSON path to filter with parameterized query # Note: JSON path requires literal key name, cannot be parameterized # Use json_extract_string function for ClickZetta compatibility - sql = (f"DELETE FROM {self._config.schema_name}.{self._table_name} " - f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?") + sql = ( + f"DELETE FROM {self._config.schema_name}.{self._table_name} " + f"WHERE json_extract_string({Field.METADATA_KEY.value}, '$.{key}') = ?" + ) cursor.execute(sql, [value]) def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: @@ -532,15 +541,15 @@ class ClickzettaVector(BaseVector): distance_func = "COSINE_DISTANCE" if score_threshold > 0: query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" - filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, " - f"{query_vector_str}) < {2 - score_threshold}") + filter_clauses.append( + f"{distance_func}({Field.VECTOR.value}, {query_vector_str}) < {2 - score_threshold}" + ) else: # For L2 distance, smaller is better distance_func = "L2_DISTANCE" if score_threshold > 0: query_vector_str = f"CAST('[{self._format_vector_simple(query_vector)}]' AS VECTOR({vector_dimension}))" - filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, " - f"{query_vector_str}) < {score_threshold}") + filter_clauses.append(f"{distance_func}({Field.VECTOR.value}, {query_vector_str}) < {score_threshold}") where_clause = " AND ".join(filter_clauses) if filter_clauses else "1=1" @@ -560,10 +569,10 @@ class ClickzettaVector(BaseVector): with connection.cursor() as cursor: # Use hints parameter for vector search optimization search_hints = { - 'hints': { - 'sdk.job.timeout': 60, # Increase timeout for vector search - 'cz.sql.job.fast.mode': True, - 'cz.storage.parquet.vector.index.read.memory.cache': True + "hints": { + "sdk.job.timeout": 60, # Increase timeout for vector search + "cz.sql.job.fast.mode": True, + "cz.storage.parquet.vector.index.read.memory.cache": True, } } cursor.execute(search_sql, parameters=search_hints) @@ -584,10 +593,11 @@ class ClickzettaVector(BaseVector): else: metadata = {} except (json.JSONDecodeError, TypeError) as e: - logger.error("JSON parsing failed: %s", e) + logger.exception("JSON parsing failed") # Fallback: extract document_id with regex import re - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} # Ensure required fields are set @@ -654,10 +664,10 @@ class ClickzettaVector(BaseVector): try: # Use hints parameter for full-text search optimization fulltext_hints = { - 'hints': { - 'sdk.job.timeout': 30, # Timeout for full-text search - 'cz.sql.job.fast.mode': True, - 'cz.sql.index.prewhere.enabled': True + "hints": { + "sdk.job.timeout": 30, # Timeout for full-text search + "cz.sql.job.fast.mode": True, + "cz.sql.index.prewhere.enabled": True, } } cursor.execute(search_sql, parameters=fulltext_hints) @@ -678,10 +688,11 @@ class ClickzettaVector(BaseVector): else: metadata = {} except (json.JSONDecodeError, TypeError) as e: - logger.error("JSON parsing failed: %s", e) + logger.exception("JSON parsing failed") # Fallback: extract document_id with regex import re - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} # Ensure required fields are set @@ -739,9 +750,9 @@ class ClickzettaVector(BaseVector): with connection.cursor() as cursor: # Use hints parameter for LIKE search optimization like_hints = { - 'hints': { - 'sdk.job.timeout': 20, # Timeout for LIKE search - 'cz.sql.job.fast.mode': True + "hints": { + "sdk.job.timeout": 20, # Timeout for LIKE search + "cz.sql.job.fast.mode": True, } } cursor.execute(search_sql, parameters=like_hints) @@ -762,10 +773,11 @@ class ClickzettaVector(BaseVector): else: metadata = {} except (json.JSONDecodeError, TypeError) as e: - logger.error("JSON parsing failed: %s", e) + logger.exception("JSON parsing failed") # Fallback: extract document_id with regex import re - doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or '')) + + doc_id_match = re.search(r'"document_id":\s*"([^"]+)"', str(row[2] or "")) metadata = {"document_id": doc_id_match.group(1)} if doc_id_match else {} # Ensure required fields are set @@ -787,10 +799,9 @@ class ClickzettaVector(BaseVector): with connection.cursor() as cursor: cursor.execute(f"DROP TABLE IF EXISTS {self._config.schema_name}.{self._table_name}") - def _format_vector_simple(self, vector: list[float]) -> str: """Simple vector formatting for SQL queries.""" - return ','.join(map(str, vector)) + return ",".join(map(str, vector)) def _safe_doc_id(self, doc_id: str) -> str: """Ensure doc_id is safe for SQL and doesn't contain special characters.""" @@ -799,13 +810,12 @@ class ClickzettaVector(BaseVector): # Remove or replace potentially problematic characters safe_id = str(doc_id) # Only allow alphanumeric, hyphens, underscores - safe_id = ''.join(c for c in safe_id if c.isalnum() or c in '-_') + safe_id = "".join(c for c in safe_id if c.isalnum() or c in "-_") if not safe_id: # If all characters were removed return str(uuid.uuid4()) return safe_id[:255] # Limit length - class ClickzettaVectorFactory(AbstractVectorFactory): """Factory for creating Clickzetta vector instances.""" @@ -831,4 +841,3 @@ class ClickzettaVectorFactory(AbstractVectorFactory): collection_name = Dataset.gen_collection_name_by_id(dataset.id).lower() return ClickzettaVector(collection_name=collection_name, config=config) - diff --git a/api/tasks/clean_dataset_task.py b/api/tasks/clean_dataset_task.py index fe6d613b1c..c769446ed5 100644 --- a/api/tasks/clean_dataset_task.py +++ b/api/tasks/clean_dataset_task.py @@ -56,15 +56,17 @@ def clean_dataset_task( documents = db.session.query(Document).where(Document.dataset_id == dataset_id).all() segments = db.session.query(DocumentSegment).where(DocumentSegment.dataset_id == dataset_id).all() + # Fix: Always clean vector database resources regardless of document existence + # This ensures all 33 vector databases properly drop tables/collections/indices + if doc_form is None: + raise ValueError("Index type must be specified.") + index_processor = IndexProcessorFactory(doc_form).init_index_processor() + index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True) + if documents is None or len(documents) == 0: logging.info(click.style(f"No documents found for dataset: {dataset_id}", fg="green")) else: logging.info(click.style(f"Cleaning documents for dataset: {dataset_id}", fg="green")) - # Specify the index type before initializing the index processor - if doc_form is None: - raise ValueError("Index type must be specified.") - index_processor = IndexProcessorFactory(doc_form).init_index_processor() - index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True) for document in documents: db.session.delete(document) diff --git a/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py b/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py index 0aa92bc84a..8b57132772 100644 --- a/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py +++ b/api/tests/integration_tests/vdb/clickzetta/test_clickzetta.py @@ -39,10 +39,7 @@ class TestClickzettaVector(AbstractVectorTest): ) with setup_mock_redis(): - vector = ClickzettaVector( - collection_name="test_collection_" + str(os.getpid()), - config=config - ) + vector = ClickzettaVector(collection_name="test_collection_" + str(os.getpid()), config=config) yield vector @@ -114,7 +111,7 @@ class TestClickzettaVector(AbstractVectorTest): "category": "technical" if i % 2 == 0 else "general", "document_id": f"doc_{i // 3}", # Group documents "importance": i, - } + }, ) documents.append(doc) # Create varied embeddings @@ -124,22 +121,14 @@ class TestClickzettaVector(AbstractVectorTest): # Test vector search with document filter query_vector = [0.5, 1.0, 1.5, 2.0] - results = vector_store.search_by_vector( - query_vector, - top_k=5, - document_ids_filter=["doc_0", "doc_1"] - ) + results = vector_store.search_by_vector(query_vector, top_k=5, document_ids_filter=["doc_0", "doc_1"]) assert len(results) > 0 # All results should belong to doc_0 or doc_1 groups for result in results: assert result.metadata["document_id"] in ["doc_0", "doc_1"] # Test score threshold - results = vector_store.search_by_vector( - query_vector, - top_k=10, - score_threshold=0.5 - ) + results = vector_store.search_by_vector(query_vector, top_k=10, score_threshold=0.5) # Check that all results have a score above threshold for result in results: assert result.metadata.get("score", 0) >= 0.5 @@ -154,7 +143,7 @@ class TestClickzettaVector(AbstractVectorTest): for i in range(batch_size): doc = Document( page_content=f"Batch document {i}: This is a test document for batch processing.", - metadata={"doc_id": f"batch_doc_{i}", "batch": "test_batch"} + metadata={"doc_id": f"batch_doc_{i}", "batch": "test_batch"}, ) documents.append(doc) embeddings.append([0.1 * (i % 10), 0.2 * (i % 10), 0.3 * (i % 10), 0.4 * (i % 10)]) @@ -179,7 +168,7 @@ class TestClickzettaVector(AbstractVectorTest): # Test special characters in content special_doc = Document( page_content="Special chars: 'quotes', \"double\", \\backslash, \n newline", - metadata={"doc_id": "special_doc", "test": "edge_case"} + metadata={"doc_id": "special_doc", "test": "edge_case"}, ) embeddings = [[0.1, 0.2, 0.3, 0.4]] @@ -199,20 +188,18 @@ class TestClickzettaVector(AbstractVectorTest): # Prepare documents with various language content documents = [ Document( - page_content="云器科技提供强大的Lakehouse解决方案", - metadata={"doc_id": "cn_doc_1", "lang": "chinese"} + page_content="云器科技提供强大的Lakehouse解决方案", metadata={"doc_id": "cn_doc_1", "lang": "chinese"} ), Document( page_content="Clickzetta provides powerful Lakehouse solutions", - metadata={"doc_id": "en_doc_1", "lang": "english"} + metadata={"doc_id": "en_doc_1", "lang": "english"}, ), Document( - page_content="Lakehouse是现代数据架构的重要组成部分", - metadata={"doc_id": "cn_doc_2", "lang": "chinese"} + page_content="Lakehouse是现代数据架构的重要组成部分", metadata={"doc_id": "cn_doc_2", "lang": "chinese"} ), Document( page_content="Modern data architecture includes Lakehouse technology", - metadata={"doc_id": "en_doc_2", "lang": "english"} + metadata={"doc_id": "en_doc_2", "lang": "english"}, ), ] diff --git a/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py b/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py index 5f2e290ad4..ef54eaa174 100644 --- a/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py +++ b/api/tests/integration_tests/vdb/clickzetta/test_docker_integration.py @@ -2,6 +2,7 @@ """ Test Clickzetta integration in Docker environment """ + import os import time @@ -20,7 +21,7 @@ def test_clickzetta_connection(): service=os.getenv("CLICKZETTA_SERVICE", "api.clickzetta.com"), workspace=os.getenv("CLICKZETTA_WORKSPACE", "test_workspace"), vcluster=os.getenv("CLICKZETTA_VCLUSTER", "default"), - database=os.getenv("CLICKZETTA_SCHEMA", "dify") + database=os.getenv("CLICKZETTA_SCHEMA", "dify"), ) with conn.cursor() as cursor: @@ -36,7 +37,7 @@ def test_clickzetta_connection(): # Check if test collection exists test_collection = "collection_test_dataset" - if test_collection in [t[1] for t in tables if t[0] == 'dify']: + if test_collection in [t[1] for t in tables if t[0] == "dify"]: cursor.execute(f"DESCRIBE dify.{test_collection}") columns = cursor.fetchall() print(f"✓ Table structure for {test_collection}:") @@ -55,6 +56,7 @@ def test_clickzetta_connection(): print(f"✗ Connection test failed: {e}") return False + def test_dify_api(): """Test Dify API with Clickzetta backend""" print("\n=== Testing Dify API ===") @@ -83,6 +85,7 @@ def test_dify_api(): print(f"✗ API test failed: {e}") return False + def verify_table_structure(): """Verify the table structure meets Dify requirements""" print("\n=== Verifying Table Structure ===") @@ -91,15 +94,10 @@ def verify_table_structure(): "id": "VARCHAR", "page_content": "VARCHAR", "metadata": "VARCHAR", # JSON stored as VARCHAR in Clickzetta - "vector": "ARRAY" + "vector": "ARRAY", } - expected_metadata_fields = [ - "doc_id", - "doc_hash", - "document_id", - "dataset_id" - ] + expected_metadata_fields = ["doc_id", "doc_hash", "document_id", "dataset_id"] print("✓ Expected table structure:") for col, dtype in expected_columns.items(): @@ -117,6 +115,7 @@ def verify_table_structure(): return True + def main(): """Run all tests""" print("Starting Clickzetta integration tests for Dify Docker\n") @@ -137,9 +136,9 @@ def main(): results.append((test_name, False)) # Summary - print("\n" + "="*50) + print("\n" + "=" * 50) print("Test Summary:") - print("="*50) + print("=" * 50) passed = sum(1 for _, success in results if success) total = len(results) @@ -161,5 +160,6 @@ def main(): print("\n⚠️ Some tests failed. Please check the errors above.") return 1 + if __name__ == "__main__": exit(main()) From cb5e2ad9b2922eb05ef77947b91d6c908802bdd0 Mon Sep 17 00:00:00 2001 From: Matri Qi Date: Fri, 8 Aug 2025 09:25:41 +0800 Subject: [PATCH 187/415] lint: fix tailwind lint issues (#23367) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/app/account/account-page/AvatarWithEdit.tsx | 2 +- .../app-access-control/access-control-dialog.tsx | 2 +- .../add-member-or-group-pop.tsx | 4 ++-- .../app/create-app-dialog/app-list/sidebar.tsx | 4 ++-- .../base/app-icon-picker/ImageInput.tsx | 2 +- web/app/components/base/block-input/index.tsx | 4 ++-- web/app/components/base/button/index.tsx | 2 +- web/app/components/base/content-dialog/index.tsx | 8 ++++---- web/app/components/base/dialog/index.tsx | 10 +++++----- .../features/new-feature-panel/dialog-wrapper.tsx | 2 +- .../components/base/fullscreen-modal/index.tsx | 6 +++--- web/app/components/base/grid-mask/index.tsx | 4 ++-- web/app/components/base/logo/logo-site.tsx | 2 +- web/app/components/base/modal/index.tsx | 8 ++++---- web/app/components/base/premium-badge/index.tsx | 2 +- .../components/base/segmented-control/index.tsx | 8 ++++---- web/app/components/base/skeleton/index.tsx | 4 ++-- web/app/components/base/switch/index.tsx | 6 +++--- web/app/components/base/tag/index.tsx | 4 ++-- web/app/components/billing/pricing/index.tsx | 4 ++-- .../billing/pricing/self-hosted-plan-item.tsx | 2 +- .../datasets/create/step-two/option-card.tsx | 6 +++--- .../components/datasets/create/stepper/step.tsx | 8 ++++---- .../components/datasets/create/top-bar/index.tsx | 2 +- .../detail/completed/child-segment-list.tsx | 15 +++++++-------- .../detail/completed/common/chunk-content.tsx | 4 ++-- .../completed/common/full-screen-drawer.tsx | 6 +++--- .../detail/completed/new-child-segment.tsx | 7 +++---- .../datasets/documents/detail/embedding/index.tsx | 4 +--- .../formatted-text/flavours/edit-slice.tsx | 2 +- .../datasets/formatted-text/flavours/shared.tsx | 8 ++++---- web/app/components/datasets/preview/container.tsx | 4 ++-- .../settings/index-method-radio/index.tsx | 2 +- web/app/components/develop/code.tsx | 4 ++-- .../account-setting/Integrations-page/index.tsx | 2 +- .../header/account-setting/collapse/index.tsx | 2 +- .../model-provider-page/model-badge/index.tsx | 2 +- .../model-load-balancing-configs.tsx | 4 ++-- .../model-load-balancing-modal.tsx | 4 ++-- web/app/components/header/app-back/index.tsx | 6 +++--- web/app/components/header/indicator/index.tsx | 2 +- web/app/components/workflow/header/undo-redo.tsx | 8 ++++---- .../workflow/header/view-workflow-history.tsx | 2 +- .../workflow/nodes/_base/components/group.tsx | 2 +- .../assigner/components/operation-selector.tsx | 4 ++-- .../json-schema-config-modal/error-message.tsx | 2 +- 46 files changed, 99 insertions(+), 103 deletions(-) diff --git a/web/app/account/account-page/AvatarWithEdit.tsx b/web/app/account/account-page/AvatarWithEdit.tsx index 8250789def..41a6971bf5 100644 --- a/web/app/account/account-page/AvatarWithEdit.tsx +++ b/web/app/account/account-page/AvatarWithEdit.tsx @@ -87,7 +87,7 @@ const AvatarWithEdit = ({ onSave, ...props }: AvatarWithEditProps) => {
    { setIsShowAvatarPicker(true) }} - className="absolute inset-0 flex cursor-pointer items-center justify-center rounded-full bg-black bg-opacity-50 opacity-0 transition-opacity group-hover:opacity-100" + className="absolute inset-0 flex cursor-pointer items-center justify-center rounded-full bg-black/50 opacity-0 transition-opacity group-hover:opacity-100" > diff --git a/web/app/components/app/app-access-control/access-control-dialog.tsx b/web/app/components/app/app-access-control/access-control-dialog.tsx index 72dd33c72e..479eedc9cf 100644 --- a/web/app/components/app/app-access-control/access-control-dialog.tsx +++ b/web/app/components/app/app-access-control/access-control-dialog.tsx @@ -32,7 +32,7 @@ const AccessControlDialog = ({ leaveFrom="opacity-100" leaveTo="opacity-0" > -
    +
    diff --git a/web/app/components/app/app-access-control/add-member-or-group-pop.tsx b/web/app/components/app/app-access-control/add-member-or-group-pop.tsx index da4a25c1d8..0fad6cc740 100644 --- a/web/app/components/app/app-access-control/add-member-or-group-pop.tsx +++ b/web/app/components/app/app-access-control/add-member-or-group-pop.tsx @@ -106,7 +106,7 @@ function SelectedGroupsBreadCrumb() { setSelectedGroupsForBreadcrumb([]) }, [setSelectedGroupsForBreadcrumb]) return
    - 0 && 'text-text-accent cursor-pointer')} onClick={handleReset}>{t('app.accessControlDialog.operateGroupAndMember.allMembers')} + 0 && 'cursor-pointer text-text-accent')} onClick={handleReset}>{t('app.accessControlDialog.operateGroupAndMember.allMembers')} {selectedGroupsForBreadcrumb.map((group, index) => { return
    / @@ -198,7 +198,7 @@ type BaseItemProps = { children: React.ReactNode } function BaseItem({ children, className }: BaseItemProps) { - return
    + return
    {children}
    } diff --git a/web/app/components/app/create-app-dialog/app-list/sidebar.tsx b/web/app/components/app/create-app-dialog/app-list/sidebar.tsx index 346de078b4..85c55c5385 100644 --- a/web/app/components/app/create-app-dialog/app-list/sidebar.tsx +++ b/web/app/components/app/create-app-dialog/app-list/sidebar.tsx @@ -40,13 +40,13 @@ type CategoryItemProps = { } function CategoryItem({ category, active, onClick }: CategoryItemProps) { return
  • { onClick?.(category) }}> {category === AppCategories.RECOMMENDED &&
    } + className={classNames('system-sm-medium text-components-menu-item-text group-hover:text-components-menu-item-text-hover group-[.active]:text-components-menu-item-text-active', active && 'system-sm-semibold')} />
  • } diff --git a/web/app/components/base/app-icon-picker/ImageInput.tsx b/web/app/components/base/app-icon-picker/ImageInput.tsx index d42abf867f..8d9ca50763 100644 --- a/web/app/components/base/app-icon-picker/ImageInput.tsx +++ b/web/app/components/base/app-icon-picker/ImageInput.tsx @@ -94,7 +94,7 @@ const ImageInput: FC = ({
    = ({ ?
    -
    - {t('datasetDocuments.list.table.header.fileName')} -
    + {renderSortHeader('name', t('datasetDocuments.list.table.header.fileName'))}
    {t('datasetDocuments.list.table.header.chunkingMode')}{t('datasetDocuments.list.table.header.words')}{t('datasetDocuments.list.table.header.hitCount')} + {renderSortHeader('word_count', t('datasetDocuments.list.table.header.words'))} + -
    - {t('datasetDocuments.list.table.header.uploadTime')} - -
    + {renderSortHeader('hit_count', t('datasetDocuments.list.table.header.hitCount'))} +
    + {renderSortHeader('created_at', t('datasetDocuments.list.table.header.uploadTime'))} {t('datasetDocuments.list.table.header.status')} {t('datasetDocuments.list.table.header.action')}