From dabf266048333789a2ff1408f94c3fcd636572ed Mon Sep 17 00:00:00 2001 From: dswl23 <127465898+dswl23@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:22:42 +0800 Subject: [PATCH 01/46] Fix: handle 204 No Content response in MCP client (#25040) --- api/core/mcp/client/streamable_client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index 14e346c2f3..a2b003e717 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -246,6 +246,10 @@ class StreamableHTTPTransport: logger.debug("Received 202 Accepted") return + if response.status_code == 204: + logger.debug("Received 204 No Content") + return + if response.status_code == 404: if isinstance(message.root, JSONRPCRequest): self._send_session_terminated_error( From 8eae7a95be91df1d3b017429a4abdaee4ccf9b63 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:23:04 +0800 Subject: [PATCH 02/46] Hotfix translation error (#25035) --- web/i18n/id-ID/app-annotation.ts | 20 +++++++-------- web/i18n/id-ID/app-api.ts | 26 +++++++++---------- web/i18n/id-ID/app-debug.ts | 8 +++--- web/i18n/id-ID/app-log.ts | 16 ++++++------ web/i18n/id-ID/app-overview.ts | 4 +-- web/i18n/id-ID/app.ts | 16 ++++++------ web/i18n/id-ID/common.ts | 36 +++++++++++++-------------- web/i18n/id-ID/custom.ts | 18 +++++++------- web/i18n/id-ID/dataset-creation.ts | 20 +++++++-------- web/i18n/id-ID/dataset-hit-testing.ts | 4 +-- web/i18n/id-ID/dataset-settings.ts | 2 +- web/i18n/id-ID/dataset.ts | 4 +-- web/i18n/id-ID/education.ts | 2 +- web/i18n/id-ID/explore.ts | 8 +++--- web/i18n/id-ID/login.ts | 26 +++++++++---------- web/i18n/id-ID/oauth.ts | 10 ++++---- web/i18n/id-ID/plugin.ts | 10 ++++---- web/i18n/id-ID/time.ts | 16 ++++++------ web/i18n/id-ID/workflow.ts | 26 +++++++++---------- web/i18n/tr-TR/common.ts | 2 +- web/i18n/uk-UA/common.ts | 2 +- 21 files changed, 138 insertions(+), 138 deletions(-) diff --git a/web/i18n/id-ID/app-annotation.ts b/web/i18n/id-ID/app-annotation.ts index 9ac49fc977..bfc13c9d19 100644 --- a/web/i18n/id-ID/app-annotation.ts +++ b/web/i18n/id-ID/app-annotation.ts @@ -5,10 +5,10 @@ const translation = { }, table: { header: { - answer: 'menjawab', + answer: 'Jawaban', question: 'pertanyaan', createdAt: 'dibuat di', - hits: 'Hits', + hits: 'Kecocokan', addAnnotation: 'Tambahkan Anotasi', bulkImport: 'Impor Massal', clearAllConfirm: 'Menghapus semua anotasi?', @@ -29,7 +29,7 @@ const translation = { answerName: 'Bot Pendongeng', }, addModal: { - answerName: 'Menjawab', + answerName: 'Jawaban', title: 'Tambahkan Anotasi Balasan', queryName: 'Pertanyaan', createNext: 'Tambahkan respons beranotasi lainnya', @@ -44,10 +44,10 @@ const translation = { run: 'Jalankan Batch', cancel: 'Membatalkan', title: 'Impor Massal', - browse: 'ramban', + browse: 'Telusuri', template: 'Unduh templat di sini', tip: 'File CSV harus sesuai dengan struktur berikut:', - answer: 'menjawab', + answer: 'Jawaban', contentTitle: 'konten potongan', processing: 'Dalam pemrosesan batch', completed: 'Impor selesai', @@ -69,15 +69,15 @@ const translation = { answerRequired: 'Jawaban diperlukan', }, viewModal: { - hit: 'Pukul', - hitHistory: 'Riwayat Hit', - noHitHistory: 'Tidak ada riwayat hit', + hit: 'Kecocokan', + hitHistory: 'Riwayat Kecocokan', + noHitHistory: 'Tidak ada riwayat kecocokan', annotatedResponse: 'Balas Anotasi', - hits: 'Hits', + hits: 'Kecocokan', }, hitHistoryTable: { response: 'Jawaban', - match: 'Korek api', + match: 'Kecocokan', query: 'Kueri', source: 'Sumber', time: 'Waktu', diff --git a/web/i18n/id-ID/app-api.ts b/web/i18n/id-ID/app-api.ts index fe528eebd4..627422a6b0 100644 --- a/web/i18n/id-ID/app-api.ts +++ b/web/i18n/id-ID/app-api.ts @@ -19,8 +19,8 @@ const translation = { completionMode: { createCompletionApi: 'Membuat Pesan Penyelesaian', messageIDTip: 'ID Pesan', - messageFeedbackApi: 'Umpan balik pesan (seperti)', - ratingTip: 'suka atau tidak suka, null adalah undo', + messageFeedbackApi: 'Umpan balik pesan (mis. spam, tidak relevan, pujian)', + ratingTip: '(mis. suka/tidak suka), null berarti membatalkan penilaian', parametersApi: 'Dapatkan informasi parameter aplikasi', parametersApiTip: 'Ambil parameter Input yang dikonfigurasi, termasuk nama variabel, nama bidang, jenis, dan nilai default. Biasanya digunakan untuk menampilkan bidang ini dalam formulir atau mengisi nilai default setelah klien dimuat.', info: 'Untuk pembuatan teks berkualitas tinggi, seperti artikel, ringkasan, dan terjemahan, gunakan API pesan penyelesaian dengan input pengguna. Pembuatan teks bergantung pada parameter model dan templat prompt yang ditetapkan di Dify Prompt Engineering.', @@ -48,7 +48,7 @@ const translation = { conversationsListLimitTip: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan', chatMsgHistoryLimit: 'Berapa banyak obrolan yang dikembalikan dalam satu permintaan', conversationsListFirstIdTip: 'ID rekaman terakhir di halaman saat ini, default tidak ada.', - messageFeedbackApi: 'Umpan balik pengguna terminal pesan, seperti', + messageFeedbackApi: 'Umpan balik pengguna terminal pesan (mis. spam, tidak relevan, pujian)', parametersApi: 'Dapatkan informasi parameter aplikasi', streaming: 'streaming kembali. Implementasi pengembalian streaming berdasarkan SSE (Server-Sent Events).', inputsTips: '(Opsional) Berikan bidang input pengguna sebagai pasangan kunci-nilai, sesuai dengan variabel di Prompt Eng. Kunci adalah nama variabel, Nilai adalah nilai parameter. Jika jenis bidang adalah Pilih, Nilai yang dikirimkan harus menjadi salah satu pilihan prasetel.', @@ -58,7 +58,7 @@ const translation = { createChatApiTip: 'Buat pesan percakapan baru atau lanjutkan dialog yang ada.', chatMsgHistoryApiTip: 'Halaman pertama mengembalikan bilah \'batas\' terbaru, yang dalam urutan terbalik.', conversationsListApi: 'Dapatkan daftar percakapan', - ratingTip: 'suka atau tidak suka, null adalah undo', + ratingTip: '(mis. suka/tidak suka), null berarti membatalkan penilaian', conversationRenamingApi: 'Penggantian nama percakapan', }, develop: { @@ -67,19 +67,19 @@ const translation = { pathParams: 'Parameter Jalur', requestBody: 'Isi Permintaan', }, - apiServer: 'API Server', + apiServer: 'Server API', copied: 'Disalin', - copy: 'Menyalin', - ok: 'Dalam Layanan', - regenerate: 'Regenerasi', - status: 'Keadaan', + copy: 'Salin', + ok: 'OK', + regenerate: 'Hasilkan Ulang', + status: 'Status', never: 'Tidak pernah', - playing: 'Bermain', - play: 'Bermain', - disabled: 'Cacat', + playing: 'Sedang Memutar', + play: 'Putar', + disabled: 'Dinonaktifkan', apiKey: 'Kunci API', pause: 'Jeda', - loading: 'Loading', + loading: 'Memuat...', } export default translation diff --git a/web/i18n/id-ID/app-debug.ts b/web/i18n/id-ID/app-debug.ts index 5d8575c43b..1d2056117b 100644 --- a/web/i18n/id-ID/app-debug.ts +++ b/web/i18n/id-ID/app-debug.ts @@ -324,7 +324,7 @@ const translation = { }, variableTable: { action: 'Tindakan', - typeString: 'Tali', + typeString: 'String', optional: 'Fakultatif', typeSelect: 'Pilih', type: 'Jenis Masukan', @@ -346,7 +346,7 @@ const translation = { name: 'Audio', }, document: { - name: 'Surat', + name: 'Dokumen', }, video: { name: 'Video', @@ -421,7 +421,7 @@ const translation = { language: 'Bahasa', title: 'Pengaturan Suara', autoPlay: 'Putar Otomatis', - autoPlayDisabled: 'Off', + autoPlayDisabled: 'Dinonaktifkan', resolutionTooltip: 'Bahasa pendukung suara text-to-speech。', }, settings: 'Pengaturan', @@ -448,7 +448,7 @@ const translation = { }, inputs: { queryPlaceholder: 'Silakan masukkan teks permintaan.', - run: 'LARI', + run: 'Jalankan', completionVarTip: 'Isi nilai variabel, yang akan secara otomatis diganti dengan kata-kata prompt setiap kali pertanyaan diajukan.', noVar: 'Isi nilai variabel, yang akan secara otomatis diganti dalam kata prompt setiap kali sesi baru dimulai.', noPrompt: 'Coba tulis beberapa prompt dalam input pra-prompt', diff --git a/web/i18n/id-ID/app-log.ts b/web/i18n/id-ID/app-log.ts index a33190f37e..1ccf8dec1e 100644 --- a/web/i18n/id-ID/app-log.ts +++ b/web/i18n/id-ID/app-log.ts @@ -5,8 +5,8 @@ const translation = { version: 'VERSI', time: 'Waktu yang dibuat', messageCount: 'Jumlah Pesan', - summary: 'Titel', - adminRate: 'Tingkat Op.', + summary: 'Ringkasan', + adminRate: 'Tingkat Admin', user: 'Pengguna Akhir atau Akun', startTime: 'WAKTU MULAI', updatedTime: 'Waktu yang diperbarui', @@ -18,8 +18,8 @@ const translation = { runtime: 'WAKTU BERJALAN', }, pagination: { - previous: 'Prev', - next: 'Depan', + previous: 'Sebelumnya', + next: 'Selanjutnya', }, empty: { element: { @@ -30,12 +30,12 @@ const translation = { }, }, detail: { - timeConsuming: '', + timeConsuming: 'Memakan waktu', operation: { dislike: 'tidak suka', like: 'suka', - addAnnotation: 'Tambahkan Peningkatan', - editAnnotation: 'Edit Peningkatan', + addAnnotation: 'Tambahkan Anotasi', + editAnnotation: 'Edit Anotasi', annotationPlaceholder: 'Masukkan jawaban yang diharapkan yang Anda inginkan untuk dibalas AI, yang dapat digunakan untuk penyempurnaan model dan peningkatan berkelanjutan kualitas pembuatan teks di masa mendatang.', }, time: 'Waktu', @@ -67,7 +67,7 @@ const translation = { }, ascending: 'Naik', descending: 'Turun', - sortBy: 'Kota hitam:', + sortBy: 'Urutkan berdasarkan', }, runDetail: { fileListDetail: 'Detail', diff --git a/web/i18n/id-ID/app-overview.ts b/web/i18n/id-ID/app-overview.ts index ce644e2c16..474e85bfd5 100644 --- a/web/i18n/id-ID/app-overview.ts +++ b/web/i18n/id-ID/app-overview.ts @@ -119,8 +119,8 @@ const translation = { explanation: 'Mudah diintegrasikan ke dalam aplikasi Anda', }, status: { - disable: 'Cacat', - running: 'Dalam Layanan', + disable: 'Nonaktif', + running: 'Berjalan', }, title: 'Ikhtisar', }, diff --git a/web/i18n/id-ID/app.ts b/web/i18n/id-ID/app.ts index 0788b96e23..05ab9fbe49 100644 --- a/web/i18n/id-ID/app.ts +++ b/web/i18n/id-ID/app.ts @@ -23,7 +23,7 @@ const translation = { appCreated: 'Aplikasi dibuat', appNamePlaceholder: 'Beri nama aplikasi Anda', appCreateDSLErrorPart3: 'Versi DSL aplikasi saat ini:', - Cancel: 'Membatalkan', + Cancel: 'Batal', previewDemo: 'Pratinjau demo', appCreateDSLWarning: 'Perhatian: Perbedaan versi DSL dapat memengaruhi fitur tertentu', appCreateDSLErrorPart1: 'Perbedaan yang signifikan dalam versi DSL telah terdeteksi. Memaksa impor dapat menyebabkan aplikasi tidak berfungsi.', @@ -46,7 +46,7 @@ const translation = { showTemplates: 'Saya ingin memilih dari templat', caution: 'Hati', chatbotShortDescription: 'Chatbot berbasis LLM dengan pengaturan sederhana', - Confirm: 'Mengkonfirmasi', + Confirm: 'Konfirmasi', agentAssistant: 'Asisten Agen Baru', appCreateFailed: 'Gagal membuat aplikasi', appCreateDSLErrorTitle: 'Ketidakcocokan Versi', @@ -58,7 +58,7 @@ const translation = { appTypeRequired: 'Silakan pilih jenis aplikasi', advancedShortDescription: 'Alur kerja disempurnakan untuk obrolan multi-giliran', completeAppIntro: 'Saya ingin membuat aplikasi yang menghasilkan teks berkualitas tinggi berdasarkan petunjuk, seperti menghasilkan artikel, ringkasan, terjemahan, dan banyak lagi.', - Create: 'Menciptakan', + Create: 'Buat', advancedUserDescription: 'Alur kerja dengan fitur memori tambahan dan antarmuka chatbot.', dropDSLToCreateApp: 'Jatuhkan file DSL di sini untuk membuat aplikasi', completeApp: 'Pembuat Teks', @@ -83,10 +83,10 @@ const translation = { searchAllTemplate: 'Cari semua templat...', }, iconPicker: { - cancel: 'Membatalkan', + cancel: 'Batal', emoji: 'Emoji', image: 'Citra', - ok: 'OKE', + ok: 'OK', }, answerIcon: { title: 'Gunakan ikon aplikasi web untuk mengganti 🤖', @@ -129,7 +129,7 @@ const translation = { }, weave: { description: 'Weave adalah platform sumber terbuka untuk mengevaluasi, menguji, dan memantau aplikasi LLM.', - title: 'Anyam', + title: 'Weave', }, aliyun: { title: 'Monitor Awan', @@ -148,8 +148,8 @@ const translation = { collapse: 'Roboh', tracing: 'Menelusuri', title: 'Melacak performa aplikasi', - disabled: 'Cacat', - enabled: 'Dalam Layanan', + disabled: 'Nonaktif', + enabled: 'Aktif', config: 'Konfigurasi', description: 'Mengonfigurasi penyedia LLMOps Pihak Ketiga dan melacak performa aplikasi.', inUse: 'Sedang digunakan', diff --git a/web/i18n/id-ID/common.ts b/web/i18n/id-ID/common.ts index 6cfb9577c4..5dafec233e 100644 --- a/web/i18n/id-ID/common.ts +++ b/web/i18n/id-ID/common.ts @@ -1,9 +1,9 @@ const translation = { theme: { theme: 'Tema', - light: 'ringan', - auto: 'sistem', - dark: 'gelap', + light: 'Terang', + auto: 'Otomatis', + dark: 'Gelap', }, api: { success: 'Keberhasilan', @@ -16,8 +16,8 @@ const translation = { setup: 'Setup', download: 'Mengunduh', getForFree: 'Dapatkan gratis', - reload: 'Reload', - lineBreak: 'Istirahat baris', + reload: 'Muat Ulang', + lineBreak: 'Baris Baru', learnMore: 'Pelajari lebih lanjut', saveAndRegenerate: 'Simpan & Buat Ulang Potongan Anak', zoomOut: 'Perkecil', @@ -26,7 +26,7 @@ const translation = { selectAll: 'Pilih Semua', in: 'di', skip: 'Lewat', - remove: 'Buka', + remove: 'Hapus', rename: 'Ubah nama', close: 'Tutup', ok: 'OKE', @@ -35,8 +35,8 @@ const translation = { log: 'Batang', delete: 'Menghapus', viewDetails: 'Lihat Detail', - view: 'Melihat', - clear: 'Jelas', + view: 'Lihat', + clear: 'Hapus', deleteApp: 'Hapus Aplikasi', downloadSuccess: 'Unduh Selesai.', change: 'Ubah', @@ -45,7 +45,7 @@ const translation = { copied: 'Disalin', deSelectAll: 'Batalkan pilihan Semua', saveAndEnable: 'Simpan & Aktifkan', - refresh: 'Restart', + refresh: 'Segarkan', downloadFailed: 'Unduhan gagal. Silakan coba lagi nanti.', edit: 'Mengedit', send: 'Kirim', @@ -56,7 +56,7 @@ const translation = { add: 'Tambah', copy: 'Menyalin', audioSourceUnavailable: 'AudioSource tidak tersedia', - submit: 'Tunduk', + submit: 'Kirim', duplicate: 'Duplikat', save: 'Simpan', added: 'Ditambahkan', @@ -100,7 +100,7 @@ const translation = { }, }, unit: { - char: 'Tank', + char: 'karakter', }, actionMsg: { noModification: 'Tidak ada modifikasi saat ini.', @@ -148,7 +148,7 @@ const translation = { account: 'Rekening', newApp: 'Aplikasi Baru', explore: 'Menjelajahi', - apps: 'Belajar', + apps: 'Aplikasi', status: 'beta', tools: 'Perkakas', exploreMarketplace: 'Jelajahi Marketplace', @@ -165,7 +165,7 @@ const translation = { settings: 'Pengaturan', support: 'Dukung', github: 'GitHub', - about: 'Sekitar', + about: 'Tentang', workspace: 'Workspace', createWorkspace: 'Membuat Ruang Kerja', }, @@ -503,13 +503,13 @@ const translation = { }, integratedAlert: 'Notion terintegrasi melalui kredensial internal, tidak perlu mengotorisasi ulang.', disconnected: 'Terputus', - remove: 'Buka', + remove: 'Hapus', addWorkspace: 'Menambahkan ruang kerja', description: 'Menggunakan Notion sebagai sumber data untuk Pengetahuan.', connected: 'Terhubung', pagesAuthorized: 'Halaman yang disahkan', changeAuthorizedPages: 'Mengubah halaman resmi', - title: 'Gagasan', + title: 'Notion', sync: 'Sync', connectedWorkspace: 'Ruang kerja yang terhubung', }, @@ -597,7 +597,7 @@ const translation = { 'claude-2': 'Claude-2', 'gpt-3.5-turbo': 'GPT-3.5-Turbo', 'gpt-4': 'GPT-4', - 'whisper-1': 'Bisikan-1', + 'whisper-1': 'Whisper-1', 'text-davinci-003': 'Teks-Davinci-003', 'gpt-4-32k': 'GPT-4-32K', 'gpt-3.5-turbo-16k': 'GPT-3.5-Turbo-16K', @@ -615,7 +615,7 @@ const translation = { }, resend: 'Kirim Ulang', conversationName: 'Nama percakapan', - thinking: 'Pikiran...', + thinking: 'Sedang berpikir...', conversationNameCanNotEmpty: 'Nama percakapan diperlukan', thought: 'Pikiran', renameConversation: 'Ganti Nama Percakapan', @@ -712,7 +712,7 @@ const translation = { deleteDescription: 'Apakah Anda yakin ingin menghapus gambar profil Anda? Akun Anda akan menggunakan avatar awal default.', }, imageInput: { - browse: 'ramban', + browse: 'Telusuri', supportedFormats: 'Mendukung PNG, JPG, JPEG, WEBP dan GIF', dropImageHere: 'Letakkan gambar Anda di sini, atau', }, diff --git a/web/i18n/id-ID/custom.ts b/web/i18n/id-ID/custom.ts index 6b45241f16..c4a26800d2 100644 --- a/web/i18n/id-ID/custom.ts +++ b/web/i18n/id-ID/custom.ts @@ -7,25 +7,25 @@ const translation = { }, webapp: { changeLogoTip: 'Format SVG atau PNG dengan ukuran minimum 40x40px', - removeBrand: 'Hapus Didukung oleh Dify', - changeLogo: 'Perubahan Didukung oleh Citra Merek', - title: 'Sesuaikan merek aplikasi web', + removeBrand: 'Hapus Branding Dify', + changeLogo: 'Ubah Logo Merek', + title: 'Kustomisasi Branding Aplikasi Web', }, app: { - title: 'Menyesuaikan merek header aplikasi', + title: 'Kustomisasi Branding Header Aplikasi', changeLogoTip: 'Format SVG atau PNG dengan ukuran minimal 80x80px', }, customize: { - suffix: 'untuk meningkatkan ke edisi Enterprise.', - prefix: 'Untuk menyesuaikan logo merek di dalam aplikasi, silakan', - contactUs: 'Hubungi', + suffix: 'untuk upgrade ke edisi Enterprise.', + prefix: 'Untuk kustomisasi logo merek di dalam aplikasi, silakan', + contactUs: 'Hubungi Kami', }, custom: 'Kustomisasi', - uploading: 'Meng', + uploading: 'Mengunggah...', upload: 'Unggah', change: 'Ubah', restore: 'Pulihkan Default', - apply: 'Berlaku', + apply: 'Terapkan', uploadedFail: 'Unggahan gambar gagal, silakan unggah ulang.', } diff --git a/web/i18n/id-ID/dataset-creation.ts b/web/i18n/id-ID/dataset-creation.ts index 9c22751a25..2712f9af0f 100644 --- a/web/i18n/id-ID/dataset-creation.ts +++ b/web/i18n/id-ID/dataset-creation.ts @@ -56,17 +56,17 @@ const translation = { tip: 'Pengetahuan kosong tidak akan berisi dokumen, dan Anda dapat mengunggah dokumen kapan saja.', }, website: { - configure: 'Mengkonfigurasi', + configure: 'Konfigurasikan', fireCrawlNotConfigured: 'Firecrawl tidak dikonfigurasi', chooseProvider: 'Pilih penyedia', - configureFirecrawl: 'Mengonfigurasi Firecrawl', + configureFirecrawl: 'Konfigurasikan Firecrawl', watercrawlDoc: 'Dokumen Watercrawl', options: 'Pilihan', firecrawlTitle: 'Mengekstrak konten web dengan 🔥Firecrawl', jinaReaderNotConfigured: 'Jina Reader tidak dikonfigurasi', preview: 'Pratayang', resetAll: 'Atur Ulang Semua', - run: 'Lari', + run: 'Jalankan', limit: 'Batas', useSitemap: 'Menggunakan peta situs', jinaReaderDoc: 'Pelajari lebih lanjut tentang Jina Reader', @@ -87,19 +87,19 @@ const translation = { maxDepth: 'Kedalaman maks', jinaReaderDocLink: 'https://jina.ai/reader', selectAll: 'Pilih Semua', - maxDepthTooltip: 'Kedalaman maksimum untuk di-crawl relatif terhadap URL yang dimasukkan. Kedalaman 0 hanya mengikis halaman url yang dimasukkan, kedalaman 1 mengikis url dan semuanya setelah dimasukkanURL satu /, dan seterusnya.', + maxDepthTooltip: 'Kedalaman maksimum untuk di-crawl relatif terhadap URL yang dimasukkan. Kedalaman 0 hanya mengikis halaman url yang dimasukkan, kedalaman 1 mengikis url dan semuanya setelah dimasukkan URL satu /, dan seterusnya.', waterCrawlNotConfiguredDescription: 'Konfigurasikan Watercrawl dengan kunci API untuk menggunakannya.', firecrawlDoc: 'Dokumen Firecrawl', - configureWatercrawl: 'Mengonfigurasi Watercrawl', + configureWatercrawl: 'Konfigurasikan Watercrawl', }, pagePreview: 'Pratinjau Halaman', - notionSyncTitle: 'Gagasan tidak terhubung', + notionSyncTitle: 'Notion tidak terhubung', filePreview: 'Pratinjau File', cancel: 'Membatalkan', emptyDatasetCreation: 'Saya ingin membuat Pengetahuan kosong', - button: 'Depan', + button: 'Berikutnya', notionSyncTip: 'Untuk menyinkronkan dengan Notion, koneksi ke Notion harus dibuat terlebih dahulu.', - connect: 'Buka terhubung', + connect: 'Hubungkan', }, stepTwo: { paragraph: 'Paragraf', @@ -162,7 +162,7 @@ const translation = { previewChunkTip: 'Klik tombol \'Pratinjau Potongan\' di sebelah kiri untuk memuat pratinjau', sideTipP4: 'Potongan dan pembersihan yang tepat meningkatkan kinerja model, memberikan hasil yang lebih akurat dan berharga.', previewTitleButton: 'Pratayang', - switch: 'Sakelar', + switch: 'Beralih', datasetSettingLink: 'Pengaturan pengetahuan.', rules: 'Aturan Pra-pemrosesan Teks', sideTipP2: 'Segmentasi membagi teks panjang menjadi paragraf sehingga model dapat memahami dengan lebih baik. Ini meningkatkan kualitas dan relevansi hasil model.', @@ -193,7 +193,7 @@ const translation = { resume: 'Melanjutkan pemrosesan', stop: 'Hentikan pemrosesan', creationContent: 'Kami secara otomatis menamai Pengetahuan, Anda dapat memodifikasinya kapan saja.', - modelButtonConfirm: 'Mengkonfirmasi', + modelButtonConfirm: 'Konfirmasi', sideTipContent: 'Setelah dokumen selesai diindeks, Pengetahuan dapat diintegrasikan ke dalam aplikasi sebagai konteks, Anda dapat menemukan pengaturan konteks di halaman orkestrasi perintah. Anda juga dapat membuatnya sebagai plugin pengindeksan ChatGPT independen untuk dirilis.', modelButtonCancel: 'Membatalkan', label: 'Nama pengetahuan', diff --git a/web/i18n/id-ID/dataset-hit-testing.ts b/web/i18n/id-ID/dataset-hit-testing.ts index c66f650c52..d2a66eef08 100644 --- a/web/i18n/id-ID/dataset-hit-testing.ts +++ b/web/i18n/id-ID/dataset-hit-testing.ts @@ -11,7 +11,7 @@ const translation = { countWarning: 'Hingga 200 karakter.', placeholder: 'Silakan masukkan teks, disarankan untuk memasukkan kalimat deklaratif singkat.', indexWarning: 'Pengetahuan berkualitas tinggi saja.', - testing: 'Ujian', + testing: 'Pengujian', }, hit: { emptyTip: 'Hasil Pengujian Pengambilan akan ditampilkan di sini', @@ -22,7 +22,7 @@ const translation = { open: 'Buka', settingTitle: 'Pengaturan Pengambilan', dateTimeFormat: 'MM / DD / YYYY hh: mm A', - desc: 'Uji efek pukulan Pengetahuan berdasarkan teks kueri yang diberikan.', + desc: 'Uji dampak pengetahuan terhadap hasil pencarian berdasarkan teks kueri yang diberikan.', viewDetail: 'Lihat Detail', viewChart: 'Lihat GRAFIK VAKTOR', chunkDetail: 'Detail Potongan', diff --git a/web/i18n/id-ID/dataset-settings.ts b/web/i18n/id-ID/dataset-settings.ts index 247033ecdf..ba29fcf38d 100644 --- a/web/i18n/id-ID/dataset-settings.ts +++ b/web/i18n/id-ID/dataset-settings.ts @@ -3,7 +3,7 @@ const translation = { retrievalSetting: { title: 'Pengaturan Pengambilan', description: 'tentang metode pengambilan.', - longDescription: 'tentang metode pengambilan, Anda dapat mengudagnya kapan saja di pengaturan Pengetahuan.', + longDescription: 'tentang metode pengambilan, Anda dapat mengunduhnya kapan saja di pengaturan Pengetahuan.', method: 'Metode Pengambilan', learnMore: 'Pelajari lebih lanjut', }, diff --git a/web/i18n/id-ID/dataset.ts b/web/i18n/id-ID/dataset.ts index 727dca150e..1a48e623c7 100644 --- a/web/i18n/id-ID/dataset.ts +++ b/web/i18n/id-ID/dataset.ts @@ -133,7 +133,7 @@ const translation = { search: 'Metadata pencarian', }, datasetMetadata: { - disabled: 'Cacat', + disabled: 'Nonaktif', addMetaData: 'Tambahkan Metadata', description: 'Anda dapat mengelola semua metadata dalam pengetahuan ini di sini. Modifikasi akan disinkronkan ke setiap dokumen.', deleteTitle: 'Konfirmasi untuk menghapus', @@ -141,7 +141,7 @@ const translation = { rename: 'Ubah nama', builtInDescription: 'Metadata bawaan secara otomatis diekstrak dan dihasilkan. Itu harus diaktifkan sebelum digunakan dan tidak dapat diedit.', namePlaceholder: 'Nama metadata', - builtIn: 'Built-in', + builtIn: 'Bawaan', }, documentMetadata: { metadataToolTip: 'Metadata berfungsi sebagai filter penting yang meningkatkan akurasi dan relevansi pengambilan informasi. Anda dapat memodifikasi dan menambahkan metadata untuk dokumen ini di sini.', diff --git a/web/i18n/id-ID/education.ts b/web/i18n/id-ID/education.ts index 46232c953e..85f74cc51e 100644 --- a/web/i18n/id-ID/education.ts +++ b/web/i18n/id-ID/education.ts @@ -60,7 +60,7 @@ const translation = { }, dateFormat: 'MM / DD / YYYY', }, - submit: 'Tunduk', + submit: 'Kirim', toVerified: 'Dapatkan Pendidikan Terverifikasi', currentSigned: 'SAAT INI MASUK SEBAGAI', successTitle: 'Anda telah mendapatkan Dify Education Verified', diff --git a/web/i18n/id-ID/explore.ts b/web/i18n/id-ID/explore.ts index c9d22a605b..a482d8f755 100644 --- a/web/i18n/id-ID/explore.ts +++ b/web/i18n/id-ID/explore.ts @@ -1,10 +1,10 @@ const translation = { sidebar: { action: { - unpin: 'Lepaskan pin', - pin: 'Pin', - delete: 'Menghapus', - rename: 'Ubah nama', + unpin: 'Lepaskan sematan', + pin: 'Sematkan', + delete: 'Hapus', + rename: 'Ganti nama', }, delete: { content: 'Apakah Anda yakin ingin menghapus aplikasi ini?', diff --git a/web/i18n/id-ID/login.ts b/web/i18n/id-ID/login.ts index 59306fa310..317c3e7fcd 100644 --- a/web/i18n/id-ID/login.ts +++ b/web/i18n/id-ID/login.ts @@ -36,8 +36,8 @@ const translation = { continueWithCode: 'Lanjutkan dengan kode', sendVerificationCode: 'Kirim Kode Verifikasi', invalidInvitationCode: 'Kode undangan tidak valid', - installBtn: 'Mengatur', - joinTipStart: 'Mengundang Anda bergabung', + installBtn: 'Siapkan', + joinTipStart: 'Mengundang Anda untuk bergabung', or: 'ATAU', namePlaceholder: 'Nama pengguna Anda', withSSO: 'Lanjutkan dengan SSO', @@ -53,23 +53,23 @@ const translation = { invitationCodePlaceholder: 'Kode undangan Anda', emailPlaceholder: 'Email Anda', tos: 'Ketentuan Layanan', - go: 'Pergi ke Dify', + go: 'Buka Dify', forgotPassword: 'Lupa Kata Sandi Anda?', sendUsMail: 'Kirimkan perkenalan Anda melalui email kepada kami, dan kami akan menangani permintaan undangan.', pp: 'Kebijakan Privasi', activatedTipEnd: 'tim', - backToSignIn: 'Kembali untuk login', + backToSignIn: 'Kembali ke halaman masuk', passwordChanged: 'Masuk sekarang', withGitHub: 'Lanjutkan dengan GitHub', accountAlreadyInited: 'Akun sudah diinisialisasi', withGoogle: 'Lanjutkan dengan Google', - rightDesc: 'Bangun aplikasi AI yang menawan secara visual, dapat dioperasikan, dan ditingkatkan dengan mudah.', + rightDesc: 'Bangun aplikasi AI yang menarik secara visual, mudah dioperasikan, dan mudah diskalakan.', invitationCode: 'Kode Undangan', invalidToken: 'Token tidak valid atau kedaluwarsa', setAdminAccount: 'Menyiapkan akun admin', forgotPasswordDesc: 'Silakan masukkan alamat email Anda untuk mengatur ulang kata sandi Anda. Kami akan mengirimi Anda email dengan instruksi tentang cara mengatur ulang kata sandi Anda.', confirmPassword: 'Konfirmasi Kata Sandi', - changePasswordBtn: 'Menetapkan kata sandi', + changePasswordBtn: 'Tetapkan kata sandi', resetPassword: 'Atur Ulang Kata Sandi', explore: 'Jelajahi Dify', useVerificationCode: 'Gunakan Kode Verifikasi', @@ -84,7 +84,7 @@ const translation = { licenseLost: 'Lisensi Hilang', licenseInactive: 'Lisensi Tidak Aktif', enterYourName: 'Silakan masukkan nama pengguna Anda', - back: 'Belakang', + back: 'Kembali', activated: 'Masuk sekarang', goToInit: 'Jika Anda belum menginisialisasi akun, silakan buka halaman inisialisasi', licenseExpired: 'Lisensi Kedaluwarsa', @@ -94,9 +94,9 @@ const translation = { validate: 'Memvalidasi', resetPasswordDesc: 'Ketik email yang Anda gunakan untuk mendaftar di Dify dan kami akan mengirimkan email reset kata sandi kepada Anda.', licenseLostTip: 'Gagal menghubungkan server lisensi Dify. Hubungi administrator Anda untuk terus menggunakan Dify.', - signBtn: 'Tandatangan', + signBtn: 'Masuk', sendResetLink: 'Kirim tautan reset', - createAndSignIn: 'Membuat dan masuk', + createAndSignIn: 'Buat dan masuk', licenseExpiredTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda telah kedaluwarsa. Hubungi administrator Anda untuk terus menggunakan Dify.', email: 'Alamat email', noLoginMethodTip: 'Silakan hubungi admin sistem untuk menambahkan metode autentikasi.', @@ -104,11 +104,11 @@ const translation = { licenseInactiveTip: 'Lisensi Dify Enterprise untuk ruang kerja Anda tidak aktif. Hubungi administrator Anda untuk terus menggunakan Dify.', rightTitle: 'Buka potensi penuh LLM', welcome: '👋 Selamat datang di Dify, silakan login untuk melanjutkan.', - changePassword: 'Menetapkan kata sandi', - setAdminAccountDesc: 'Hak istimewa maksimum untuk akun admin, yang dapat digunakan untuk membuat aplikasi dan mengelola penyedia LLM, dll.', - join: 'Ikat', + changePassword: 'Ubah kata sandi', + setAdminAccountDesc: 'Akun admin memiliki hak istimewa penuh untuk membuat aplikasi, mengelola penyedia LLM, dll.', + join: 'Gabung', forget: 'Lupa Kata Sandi Anda?', - backToLogin: 'Kembali ke login', + backToLogin: 'Kembali ke halaman masuk', oneMoreStep: 'Satu langkah lagi', } diff --git a/web/i18n/id-ID/oauth.ts b/web/i18n/id-ID/oauth.ts index c12c4161c4..933fcb5525 100644 --- a/web/i18n/id-ID/oauth.ts +++ b/web/i18n/id-ID/oauth.ts @@ -17,11 +17,11 @@ const translation = { authorizeFailed: 'Otorisasi gagal', authAppInfoFetchFailed: 'Gagal mengambil info aplikasi untuk otorisasi', }, - continue: 'Terus', - unknownApp: 'Aplikasi Tidak Dikenal', - login: 'Login', - connect: 'Hubungkan ke', - switchAccount: 'Beralih Akun', + continue: 'Lanjut', + unknownApp: 'Aplikasi tidak dikenal', + login: 'Masuk', + connect: 'Hubungkan', + switchAccount: 'Ganti Akun', } export default translation diff --git a/web/i18n/id-ID/plugin.ts b/web/i18n/id-ID/plugin.ts index ed50a203c2..c9b9b939d8 100644 --- a/web/i18n/id-ID/plugin.ts +++ b/web/i18n/id-ID/plugin.ts @@ -39,7 +39,7 @@ const translation = { local: 'Plugin Lokal', }, operation: { - remove: 'Buka', + remove: 'Hapus', info: 'Plugin Info', update: 'Pemutakhiran', detail: 'Rincian', @@ -55,7 +55,7 @@ const translation = { empty: 'Klik tombol \' \' untuk menambahkan alat. Anda dapat menambahkan beberapa alat.', params: 'KONFIGURASI PENALARAN', unsupportedMCPTool: 'Saat ini versi plugin strategi agen yang dipilih tidak mendukung alat MCP.', - auto: 'Mobil', + auto: 'Otomatis', descriptionPlaceholder: 'Deskripsi singkat tentang tujuan alat, misalnya, mendapatkan suhu untuk lokasi tertentu.', toolSetting: 'Pengaturan Alat', settings: 'PENGATURAN PENGGUNA', @@ -174,11 +174,11 @@ const translation = { installing: 'Menginstal...', uploadFailed: 'Upload gagal', pluginLoadErrorDesc: 'Plugin ini tidak akan diinstal', - next: 'Depan', + next: 'Lanjut', installedSuccessfully: 'Instalasi berhasil', install: 'Pasang', installFailed: 'Instalasi gagal', - back: 'Belakang', + back: 'Kembali', readyToInstallPackage: 'Tentang menginstal plugin berikut', installedSuccessfullyDesc: 'Plugin telah berhasil diinstal.', pluginLoadError: 'Kesalahan pemuatan plugin', @@ -227,7 +227,7 @@ const translation = { empower: 'Berdayakan pengembangan AI Anda', partnerTip: 'Diverifikasi oleh partner Dify', moreFrom: 'Selengkapnya dari Marketplace', - sortBy: 'Kota hitam', + sortBy: 'Urutkan berdasarkan', and: 'dan', difyMarketplace: 'Dify Marketplace', verifiedTip: 'Diverifikasi oleh Dify', diff --git a/web/i18n/id-ID/time.ts b/web/i18n/id-ID/time.ts index d6ed8813e1..6e06130c7b 100644 --- a/web/i18n/id-ID/time.ts +++ b/web/i18n/id-ID/time.ts @@ -1,17 +1,17 @@ const translation = { daysInWeek: { - Wed: 'Bertaruh', + Wed: 'Rabu', Thu: 'Kamis', - Sun: 'Matahari', - Tue: 'Membunuh', - Mon: 'Mon', - Sat: 'Hari sabtu', - Fri: 'Bebas', + Sun: 'Minggu', + Tue: 'Selasa', + Mon: 'Senin', + Sat: 'Sabtu', + Fri: 'Jumat', }, months: { August: 'Agustus', October: 'Oktober', - May: 'Menjahit', + May: 'Mei', September: 'September', December: 'Desember', November: 'November', @@ -34,7 +34,7 @@ const translation = { dateFormats: { display: 'MMMM D, YYYY', input: 'YYYY-MM-DD', - outputWithTime: 'YYYY-MM-DDTHH:mm:ss. SSSZ', + outputWithTime: 'YYYY-MM-DDTHH:mm:ss.SSSZ', output: 'YYYY-MM-DD', displayWithTime: 'MMMM D, YYYY hh:mm A', }, diff --git a/web/i18n/id-ID/workflow.ts b/web/i18n/id-ID/workflow.ts index 57b138b2e2..e2daef6f7a 100644 --- a/web/i18n/id-ID/workflow.ts +++ b/web/i18n/id-ID/workflow.ts @@ -14,7 +14,7 @@ const translation = { title: 'Tarik', }, }, - undo: 'Buka', + undo: 'Urungkan', embedIntoSite: 'Sematkan ke Situs', editing: 'Mengedit', inRunMode: 'Dalam Mode Jalankan', @@ -33,10 +33,10 @@ const translation = { addParallelNode: 'Tambahkan Node Paralel', onFailure: 'Pada Kegagalan', update: 'Pemutakhiran', - parallelRun: 'Lari Paralel', - configure: 'Mengkonfigurasi', + parallelRun: 'Jalankan Paralel', + configure: 'Konfigurasikan', copy: 'Menyalin', - redo: 'Siap', + redo: 'Ulangi', runApp: 'Jalankan Aplikasi', noHistory: 'Tidak Ada Sejarah', importDSLTip: 'Draf saat ini akan ditimpa.\nEkspor alur kerja sebagai cadangan sebelum mengimpor.', @@ -54,7 +54,7 @@ const translation = { importSuccess: 'Berhasil Impor', jumpToNode: 'Lompat ke simpul ini', tagBound: 'Jumlah aplikasi yang menggunakan tag ini', - model: 'Pola', + model: 'Model', workflowAsToolTip: 'Konfigurasi ulang alat diperlukan setelah pembaruan alur kerja.', currentDraft: 'Draf Saat Ini', parallel: 'SEJAJAR', @@ -65,7 +65,7 @@ const translation = { importWarning: 'Hati', running: 'Menjalankan', publishedAt: 'Diterbitkan', - run: 'Lari', + run: 'Jalankan', importDSL: 'Impor DSL', featuresDescription: 'Tingkatkan pengalaman pengguna aplikasi web', inPreviewMode: 'Dalam Mode Pratinjau', @@ -168,9 +168,9 @@ const translation = { nodeAdd: 'Node ditambahkan', nodePaste: 'Node ditempelkan', noteDelete: 'Catatan dihapus', - hint: 'Indian', + hint: 'Petunjuk', nodeTitleChange: 'Judul simpul diubah', - title: 'Perubahan Riwayat', + title: 'Riwayat Perubahan', nodeDescriptionChange: 'Deskripsi simpul diubah', clearHistory: 'Hapus Sejarah', placeholder: 'Anda belum mengubah apa pun', @@ -189,7 +189,7 @@ const translation = { errorMsg: { fields: { variable: 'Nama Variabel', - model: 'Pola', + model: 'Model', rerankModel: 'Model Peringkat Ulang yang dikonfigurasi', visionVariable: 'Variabel Penglihatan', variableValue: 'Nilai Variabel', @@ -238,11 +238,11 @@ const translation = { 'question-classifier': 'Pengklasifikasi Pertanyaan', 'iteration-start': 'Iterasi Mulai', 'knowledge-retrieval': 'Pengambilan Pengetahuan', - 'loop': 'Lari', + 'loop': 'Perulangan', 'assigner': 'Penerima Variabel', 'agent': 'Agen', 'list-operator': 'Operator Daftar', - 'answer': 'Menjawab', + 'answer': 'Jawaban', 'parameter-extractor': 'Ekstraktor Parameter', 'document-extractor': 'Ekstraktor Dokumen', 'end': 'Ujung', @@ -286,10 +286,10 @@ const translation = { horizontal: 'Horisontal', distributeHorizontal: 'Spasi Secara Horizontal', zoomTo100: 'Perbesar hingga 100%', - alignLeft: 'Kiri', + alignLeft: 'Rata Kiri', distributeVertical: 'Ruang Secara Vertikal', zoomTo50: 'Perbesar hingga 50%', - alignBottom: 'Dasar', + alignBottom: 'Rata Bawah', }, variableReference: { conversationVars: 'variabel percakapan', diff --git a/web/i18n/tr-TR/common.ts b/web/i18n/tr-TR/common.ts index 2ecd18f787..84646f7dec 100644 --- a/web/i18n/tr-TR/common.ts +++ b/web/i18n/tr-TR/common.ts @@ -746,7 +746,7 @@ const translation = { content: 'Geri Bildirim İçeriği', subtitle: 'Lütfen bu yanıtla ilgili neyin yanlış gittiğini bize bildirin', title: 'Geri Bildirim Sağla', - placeholder: 'Lütfen neyin yanlış gittiğini veya nasıl iyileşebileceğimizı açıklayın...', + placeholder: 'Lütfen neyin yanlış gittiğini veya nasıl iyileşebileceğimizi açıklayın...', }, } diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts index bff6634aff..f8e54f8427 100644 --- a/web/i18n/uk-UA/common.ts +++ b/web/i18n/uk-UA/common.ts @@ -747,7 +747,7 @@ const translation = { title: 'Надати відгук', content: 'Зміст відгуку', placeholder: 'Будь ласка, опишіть, що пішло не так або як ми можемо покращити...', - subtitle: 'Будь ласка, скажіть нам, що пішло не так з цим відповіді', + subtitle: 'Будь ласка, скажіть нам, що пішло не так із цією відповіддю', }, } From a7627882a787d0172821f7919df88c3fe23276ac Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Wed, 3 Sep 2025 15:39:09 +0800 Subject: [PATCH 03/46] fix: Boolean type control is not displayed (#25031) Co-authored-by: WTW0313 --- .../account-setting/model-provider-page/declarations.ts | 2 +- .../workflow/nodes/_base/components/form-input-item.tsx | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/web/app/components/header/account-setting/model-provider-page/declarations.ts b/web/app/components/header/account-setting/model-provider-page/declarations.ts index 9fac34b21b..62cb1a96e9 100644 --- a/web/app/components/header/account-setting/model-provider-page/declarations.ts +++ b/web/app/components/header/account-setting/model-provider-page/declarations.ts @@ -14,7 +14,7 @@ export enum FormTypeEnum { secretInput = 'secret-input', select = 'select', radio = 'radio', - boolean = 'boolean', + boolean = 'checkbox', files = 'files', file = 'file', modelSelector = 'model-selector', diff --git a/web/app/components/workflow/nodes/_base/components/form-input-item.tsx b/web/app/components/workflow/nodes/_base/components/form-input-item.tsx index d624130317..a7825145b4 100644 --- a/web/app/components/workflow/nodes/_base/components/form-input-item.tsx +++ b/web/app/components/workflow/nodes/_base/components/form-input-item.tsx @@ -90,8 +90,8 @@ const FormInputItem: FC = ({ // return VarType.appSelector // else if (isModelSelector) // return VarType.modelSelector - // else if (isBoolean) - // return VarType.boolean + else if (isBoolean) + return VarType.boolean else if (isObject) return VarType.object else if (isArray) @@ -183,7 +183,7 @@ const FormInputItem: FC = ({ return (
{showTypeSwitch && ( - + )} {isString && ( = ({ placeholder={placeholder?.[language] || placeholder?.en_US} /> )} - {isBoolean && ( + {isBoolean && isConstant && ( Date: Wed, 3 Sep 2025 16:22:13 +0800 Subject: [PATCH 04/46] Fix advanced chat workflow event handler signature mismatch (#25078) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- Makefile | 2 +- api/core/app/apps/advanced_chat/generate_task_pipeline.py | 2 +- api/extensions/storage/clickzetta_volume/file_lifecycle.py | 5 +++-- .../storage/clickzetta_volume/volume_permissions.py | 3 ++- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 388c367fdf..d82f6f24ad 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ prepare-web: prepare-api: @echo "🔧 Setting up API environment..." @cp -n api/.env.example api/.env 2>/dev/null || echo "API .env already exists" - @cd api && uv sync --dev --extra all + @cd api && uv sync --dev @cd api && uv run flask db upgrade @echo "✅ API environment prepared (not started)" diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 2f0e4ef319..750e13c502 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -310,7 +310,7 @@ class AdvancedChatAppGenerateTaskPipeline: err = self._base_task_pipeline._handle_error(event=event, session=session, message_id=self._message_id) yield self._base_task_pipeline._error_to_stream_response(err) - def _handle_workflow_started_event(self, **kwargs) -> Generator[StreamResponse, None, None]: + def _handle_workflow_started_event(self, *args, **kwargs) -> Generator[StreamResponse, None, None]: """Handle workflow started events.""" with self._database_session() as session: workflow_execution = self._workflow_cycle_manager.handle_workflow_run_start() diff --git a/api/extensions/storage/clickzetta_volume/file_lifecycle.py b/api/extensions/storage/clickzetta_volume/file_lifecycle.py index 29210dd0f0..f5d6fd6f22 100644 --- a/api/extensions/storage/clickzetta_volume/file_lifecycle.py +++ b/api/extensions/storage/clickzetta_volume/file_lifecycle.py @@ -1,7 +1,8 @@ """ClickZetta Volume file lifecycle management -This module provides file lifecycle management features including version control, automatic cleanup, backup and restore. -Supports complete lifecycle management for knowledge base files. +This module provides file lifecycle management features including version control, +automatic cleanup, backup and restore. Supports complete lifecycle management for +knowledge base files. """ import json diff --git a/api/extensions/storage/clickzetta_volume/volume_permissions.py b/api/extensions/storage/clickzetta_volume/volume_permissions.py index e9503595af..d216790f17 100644 --- a/api/extensions/storage/clickzetta_volume/volume_permissions.py +++ b/api/extensions/storage/clickzetta_volume/volume_permissions.py @@ -121,7 +121,8 @@ class VolumePermissionManager: except Exception: logger.exception("User Volume permission check failed") - # For User Volume, if permission check fails, it might be a configuration issue, provide friendlier error message + # For User Volume, if permission check fails, it might be a configuration issue, + # provide friendlier error message logger.info("User Volume permission check failed, but permission checking is disabled in this version") return False From 67cc70ad6146ae84f78fb96bfc1e8c3f3d1d981e Mon Sep 17 00:00:00 2001 From: zxhlyh Date: Wed, 3 Sep 2025 18:23:57 +0800 Subject: [PATCH 05/46] fix: model credential name (#25081) Co-authored-by: hjlarry Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/entities/provider_configuration.py | 16 ++++++++-------- .../model-provider-page/model-modal/index.tsx | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index b74e081dd4..9119462aca 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -410,10 +410,9 @@ class ProviderConfiguration(BaseModel): :return: """ with Session(db.engine) as session: - if credential_name and self._check_provider_credential_name_exists( - credential_name=credential_name, session=session - ): - raise ValueError(f"Credential with name '{credential_name}' already exists.") + if credential_name: + if self._check_provider_credential_name_exists(credential_name=credential_name, session=session): + raise ValueError(f"Credential with name '{credential_name}' already exists.") else: credential_name = self._generate_provider_credential_name(session) @@ -890,10 +889,11 @@ class ProviderConfiguration(BaseModel): :return: """ with Session(db.engine) as session: - if credential_name and self._check_custom_model_credential_name_exists( - model=model, model_type=model_type, credential_name=credential_name, session=session - ): - raise ValueError(f"Model credential with name '{credential_name}' already exists for {model}.") + if credential_name: + if self._check_custom_model_credential_name_exists( + model=model, model_type=model_type, credential_name=credential_name, session=session + ): + raise ValueError(f"Model credential with name '{credential_name}' already exists for {model}.") else: credential_name = self._generate_custom_model_credential_name( model=model, model_type=model_type, session=session diff --git a/web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx b/web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx index adf633831b..4ffbc8f191 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-modal/index.tsx @@ -115,7 +115,7 @@ const ModelModal: FC = ({ const [selectedCredential, setSelectedCredential] = useState() const formRef2 = useRef(null) const isEditMode = !!Object.keys(formValues).filter((key) => { - return key !== '__model_name' && key !== '__model_type' + return key !== '__model_name' && key !== '__model_type' && !!formValues[key] }).length && isCurrentWorkspaceManager const handleSave = useCallback(async () => { @@ -167,7 +167,7 @@ const ModelModal: FC = ({ __authorization_name__, ...rest } = values - if (__model_name && __model_type && __authorization_name__) { + if (__model_name && __model_type) { await handleSaveCredential({ credential_id: credential?.credential_id, credentials: rest, From d011ddfc643a49369e5f1021d7a45e56b97eeb33 Mon Sep 17 00:00:00 2001 From: Stream Date: Wed, 3 Sep 2025 18:54:07 +0800 Subject: [PATCH 06/46] chore(version): bump version to 1.8.1 (#25060) --- api/pyproject.toml | 2 +- api/uv.lock | 2 +- docker/docker-compose-template.yaml | 2 +- docker/docker-compose.yaml | 2 +- web/package.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/pyproject.toml b/api/pyproject.toml index d6f74fc510..a0c108cd2c 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "dify-api" -version = "1.8.0" +version = "1.8.1" requires-python = ">=3.11,<3.13" dependencies = [ diff --git a/api/uv.lock b/api/uv.lock index 32254faa8e..7e67a84ce2 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1260,7 +1260,7 @@ wheels = [ [[package]] name = "dify-api" -version = "1.8.0" +version = "1.8.1" source = { virtual = "." } dependencies = [ { name = "arize-phoenix-otel" }, diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index a779999983..e6f76e1fb4 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index bd668be17f..a451a114bc 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -582,7 +582,7 @@ x-shared-env: &shared-api-worker-env services: # API service api: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. diff --git a/web/package.json b/web/package.json index e82b41636c..e64d548824 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "1.8.0", + "version": "1.8.1", "private": true, "packageManager": "pnpm@10.15.0", "engines": { From c7700ac1762a4feccf60211d3dca3e39ec65a83c Mon Sep 17 00:00:00 2001 From: -LAN- Date: Wed, 3 Sep 2025 20:25:44 +0800 Subject: [PATCH 07/46] chore(docker): bump version (#25092) Signed-off-by: -LAN- --- docker/docker-compose-template.yaml | 107 ++++++++++++++++++---------- docker/docker-compose.yaml | 107 ++++++++++++++++++---------- 2 files changed, 140 insertions(+), 74 deletions(-) diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index e6f76e1fb4..b479795c93 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -31,7 +31,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. @@ -58,7 +58,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. @@ -76,7 +76,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.8.0 + image: langgenius/dify-web:1.8.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -118,7 +118,17 @@ services: volumes: - ./volumes/db/data:/var/lib/postgresql/data healthcheck: - test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] + test: + [ + "CMD", + "pg_isready", + "-h", + "db", + "-U", + "${PGUSER:-postgres}", + "-d", + "${POSTGRES_DB:-dify}", + ] interval: 1s timeout: 3s retries: 60 @@ -135,7 +145,11 @@ services: # Set the redis password when startup redis server. command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} healthcheck: - test: [ 'CMD-SHELL', 'redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG' ] + test: + [ + "CMD-SHELL", + "redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG", + ] # The DifySandbox sandbox: @@ -157,7 +171,7 @@ services: - ./volumes/sandbox/dependencies:/dependencies - ./volumes/sandbox/conf:/conf healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + test: ["CMD", "curl", "-f", "http://localhost:8194/health"] networks: - ssrf_proxy_network @@ -231,7 +245,12 @@ services: volumes: - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + "sh", + "-c", + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh", + ] environment: # pls clearly modify the squid env vars to fit your network environment. HTTP_PORT: ${SSRF_HTTP_PORT:-3128} @@ -260,8 +279,8 @@ services: - CERTBOT_EMAIL=${CERTBOT_EMAIL} - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} - entrypoint: [ '/docker-entrypoint.sh' ] - command: [ 'tail', '-f', '/dev/null' ] + entrypoint: ["/docker-entrypoint.sh"] + command: ["tail", "-f", "/dev/null"] # The nginx reverse proxy. # used for reverse proxying the API service and Web service. @@ -278,7 +297,12 @@ services: - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/www:/var/www/html - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + "sh", + "-c", + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh", + ] environment: NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} @@ -300,14 +324,14 @@ services: - api - web ports: - - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' - - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + - "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}" + - "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}" # The Weaviate vector store. weaviate: image: semitechnologies/weaviate:1.19.0 profiles: - - '' + - "" - weaviate restart: always volumes: @@ -360,13 +384,17 @@ services: working_dir: /opt/couchbase stdin_open: true tty: true - entrypoint: [ "" ] + entrypoint: [""] command: sh -c "/opt/couchbase/init/init-cbserver.sh" volumes: - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data healthcheck: # ensure bucket was created before proceeding - test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + test: + [ + "CMD-SHELL", + "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1", + ] interval: 10s retries: 10 start_period: 30s @@ -392,9 +420,9 @@ services: volumes: - ./volumes/pgvector/data:/var/lib/postgresql/data - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh - entrypoint: [ '/docker-entrypoint.sh' ] + entrypoint: ["/docker-entrypoint.sh"] healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -411,14 +439,14 @@ services: - VB_USERNAME=dify - VB_PASSWORD=Difyai123456 ports: - - '5434:5432' + - "5434:5432" volumes: - ./vastbase/lic:/home/vastbase/vastbase/lic - ./vastbase/data:/home/vastbase/data - ./vastbase/backup:/home/vastbase/backup - ./vastbase/backup_log:/home/vastbase/backup_log healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -440,7 +468,7 @@ services: volumes: - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -479,7 +507,11 @@ services: ports: - "${OCEANBASE_VECTOR_PORT:-2881}:2881" healthcheck: - test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ] + test: + [ + "CMD-SHELL", + 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"', + ] interval: 10s retries: 30 start_period: 30s @@ -515,7 +547,7 @@ services: - ./volumes/milvus/etcd:/etcd command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd healthcheck: - test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + test: ["CMD", "etcdctl", "endpoint", "health"] interval: 30s timeout: 20s retries: 3 @@ -534,7 +566,7 @@ services: - ./volumes/milvus/minio:/minio_data command: minio server /minio_data --console-address ":9001" healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 @@ -546,7 +578,7 @@ services: image: milvusdb/milvus:v2.5.15 profiles: - milvus - command: [ 'milvus', 'run', 'standalone' ] + command: ["milvus", "run", "standalone"] environment: ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} @@ -554,7 +586,7 @@ services: volumes: - ./volumes/milvus/milvus:/var/lib/milvus healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] interval: 30s start_period: 90s timeout: 20s @@ -620,7 +652,7 @@ services: volumes: - ./volumes/opengauss/data:/var/lib/opengauss/data healthcheck: - test: [ "CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1" ] + test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"] interval: 10s timeout: 10s retries: 10 @@ -673,18 +705,19 @@ services: node.name: dify-es0 discovery.type: single-node xpack.license.self_generated.type: basic - xpack.security.enabled: 'true' - xpack.security.enrollment.enabled: 'false' - xpack.security.http.ssl.enabled: 'false' + xpack.security.enabled: "true" + xpack.security.enrollment.enabled: "false" + xpack.security.http.ssl.enabled: "false" ports: - ${ELASTICSEARCH_PORT:-9200}:9200 deploy: resources: limits: memory: 2g - entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"] healthcheck: - test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + test: + ["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"] interval: 30s timeout: 10s retries: 50 @@ -702,17 +735,17 @@ services: environment: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana - XPACK_SECURITY_ENABLED: 'true' - XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' - XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' - XPACK_FLEET_ISAIRGAPPED: 'true' + XPACK_SECURITY_ENABLED: "true" + XPACK_SECURITY_ENROLLMENT_ENABLED: "false" + XPACK_SECURITY_HTTP_SSL_ENABLED: "false" + XPACK_FLEET_ISAIRGAPPED: "true" I18N_LOCALE: zh-CN - SERVER_PORT: '5601' + SERVER_PORT: "5601" ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ports: - ${KIBANA_PORT:-5601}:5601 healthcheck: - test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"] interval: 30s timeout: 10s retries: 3 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index a451a114bc..9774df3df5 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -611,7 +611,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. @@ -638,7 +638,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.8.0 + image: langgenius/dify-api:1.8.1 restart: always environment: # Use the shared environment variables. @@ -656,7 +656,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.8.0 + image: langgenius/dify-web:1.8.1 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} @@ -698,7 +698,17 @@ services: volumes: - ./volumes/db/data:/var/lib/postgresql/data healthcheck: - test: [ 'CMD', 'pg_isready', '-h', 'db', '-U', '${PGUSER:-postgres}', '-d', '${POSTGRES_DB:-dify}' ] + test: + [ + "CMD", + "pg_isready", + "-h", + "db", + "-U", + "${PGUSER:-postgres}", + "-d", + "${POSTGRES_DB:-dify}", + ] interval: 1s timeout: 3s retries: 60 @@ -715,7 +725,11 @@ services: # Set the redis password when startup redis server. command: redis-server --requirepass ${REDIS_PASSWORD:-difyai123456} healthcheck: - test: [ 'CMD-SHELL', 'redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG' ] + test: + [ + "CMD-SHELL", + "redis-cli -a ${REDIS_PASSWORD:-difyai123456} ping | grep -q PONG", + ] # The DifySandbox sandbox: @@ -737,7 +751,7 @@ services: - ./volumes/sandbox/dependencies:/dependencies - ./volumes/sandbox/conf:/conf healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:8194/health' ] + test: ["CMD", "curl", "-f", "http://localhost:8194/health"] networks: - ssrf_proxy_network @@ -811,7 +825,12 @@ services: volumes: - ./ssrf_proxy/squid.conf.template:/etc/squid/squid.conf.template - ./ssrf_proxy/docker-entrypoint.sh:/docker-entrypoint-mount.sh - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + "sh", + "-c", + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh", + ] environment: # pls clearly modify the squid env vars to fit your network environment. HTTP_PORT: ${SSRF_HTTP_PORT:-3128} @@ -840,8 +859,8 @@ services: - CERTBOT_EMAIL=${CERTBOT_EMAIL} - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} - entrypoint: [ '/docker-entrypoint.sh' ] - command: [ 'tail', '-f', '/dev/null' ] + entrypoint: ["/docker-entrypoint.sh"] + command: ["tail", "-f", "/dev/null"] # The nginx reverse proxy. # used for reverse proxying the API service and Web service. @@ -858,7 +877,12 @@ services: - ./volumes/certbot/conf/live:/etc/letsencrypt/live # cert dir (with certbot container) - ./volumes/certbot/conf:/etc/letsencrypt - ./volumes/certbot/www:/var/www/html - entrypoint: [ 'sh', '-c', "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh" ] + entrypoint: + [ + "sh", + "-c", + "cp /docker-entrypoint-mount.sh /docker-entrypoint.sh && sed -i 's/\r$$//' /docker-entrypoint.sh && chmod +x /docker-entrypoint.sh && /docker-entrypoint.sh", + ] environment: NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} @@ -880,14 +904,14 @@ services: - api - web ports: - - '${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}' - - '${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}' + - "${EXPOSE_NGINX_PORT:-80}:${NGINX_PORT:-80}" + - "${EXPOSE_NGINX_SSL_PORT:-443}:${NGINX_SSL_PORT:-443}" # The Weaviate vector store. weaviate: image: semitechnologies/weaviate:1.19.0 profiles: - - '' + - "" - weaviate restart: always volumes: @@ -940,13 +964,17 @@ services: working_dir: /opt/couchbase stdin_open: true tty: true - entrypoint: [ "" ] + entrypoint: [""] command: sh -c "/opt/couchbase/init/init-cbserver.sh" volumes: - ./volumes/couchbase/data:/opt/couchbase/var/lib/couchbase/data healthcheck: # ensure bucket was created before proceeding - test: [ "CMD-SHELL", "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1" ] + test: + [ + "CMD-SHELL", + "curl -s -f -u Administrator:password http://localhost:8091/pools/default/buckets | grep -q '\\[{' || exit 1", + ] interval: 10s retries: 10 start_period: 30s @@ -972,9 +1000,9 @@ services: volumes: - ./volumes/pgvector/data:/var/lib/postgresql/data - ./pgvector/docker-entrypoint.sh:/docker-entrypoint.sh - entrypoint: [ '/docker-entrypoint.sh' ] + entrypoint: ["/docker-entrypoint.sh"] healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -991,14 +1019,14 @@ services: - VB_USERNAME=dify - VB_PASSWORD=Difyai123456 ports: - - '5434:5432' + - "5434:5432" volumes: - ./vastbase/lic:/home/vastbase/vastbase/lic - ./vastbase/data:/home/vastbase/data - ./vastbase/backup:/home/vastbase/backup - ./vastbase/backup_log:/home/vastbase/backup_log healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -1020,7 +1048,7 @@ services: volumes: - ./volumes/pgvecto_rs/data:/var/lib/postgresql/data healthcheck: - test: [ 'CMD', 'pg_isready' ] + test: ["CMD", "pg_isready"] interval: 1s timeout: 3s retries: 30 @@ -1059,7 +1087,11 @@ services: ports: - "${OCEANBASE_VECTOR_PORT:-2881}:2881" healthcheck: - test: [ 'CMD-SHELL', 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"' ] + test: + [ + "CMD-SHELL", + 'obclient -h127.0.0.1 -P2881 -uroot@test -p$${OB_TENANT_PASSWORD} -e "SELECT 1;"', + ] interval: 10s retries: 30 start_period: 30s @@ -1095,7 +1127,7 @@ services: - ./volumes/milvus/etcd:/etcd command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379 --data-dir /etcd healthcheck: - test: [ 'CMD', 'etcdctl', 'endpoint', 'health' ] + test: ["CMD", "etcdctl", "endpoint", "health"] interval: 30s timeout: 20s retries: 3 @@ -1114,7 +1146,7 @@ services: - ./volumes/milvus/minio:/minio_data command: minio server /minio_data --console-address ":9001" healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:9000/minio/health/live' ] + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 @@ -1126,7 +1158,7 @@ services: image: milvusdb/milvus:v2.5.15 profiles: - milvus - command: [ 'milvus', 'run', 'standalone' ] + command: ["milvus", "run", "standalone"] environment: ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} @@ -1134,7 +1166,7 @@ services: volumes: - ./volumes/milvus/milvus:/var/lib/milvus healthcheck: - test: [ 'CMD', 'curl', '-f', 'http://localhost:9091/healthz' ] + test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"] interval: 30s start_period: 90s timeout: 20s @@ -1200,7 +1232,7 @@ services: volumes: - ./volumes/opengauss/data:/var/lib/opengauss/data healthcheck: - test: [ "CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1" ] + test: ["CMD-SHELL", "netstat -lntp | grep tcp6 > /dev/null 2>&1"] interval: 10s timeout: 10s retries: 10 @@ -1253,18 +1285,19 @@ services: node.name: dify-es0 discovery.type: single-node xpack.license.self_generated.type: basic - xpack.security.enabled: 'true' - xpack.security.enrollment.enabled: 'false' - xpack.security.http.ssl.enabled: 'false' + xpack.security.enabled: "true" + xpack.security.enrollment.enabled: "false" + xpack.security.http.ssl.enabled: "false" ports: - ${ELASTICSEARCH_PORT:-9200}:9200 deploy: resources: limits: memory: 2g - entrypoint: [ 'sh', '-c', "sh /docker-entrypoint-mount.sh" ] + entrypoint: ["sh", "-c", "sh /docker-entrypoint-mount.sh"] healthcheck: - test: [ 'CMD', 'curl', '-s', 'http://localhost:9200/_cluster/health?pretty' ] + test: + ["CMD", "curl", "-s", "http://localhost:9200/_cluster/health?pretty"] interval: 30s timeout: 10s retries: 50 @@ -1282,17 +1315,17 @@ services: environment: XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: d1a66dfd-c4d3-4a0a-8290-2abcb83ab3aa NO_PROXY: localhost,127.0.0.1,elasticsearch,kibana - XPACK_SECURITY_ENABLED: 'true' - XPACK_SECURITY_ENROLLMENT_ENABLED: 'false' - XPACK_SECURITY_HTTP_SSL_ENABLED: 'false' - XPACK_FLEET_ISAIRGAPPED: 'true' + XPACK_SECURITY_ENABLED: "true" + XPACK_SECURITY_ENROLLMENT_ENABLED: "false" + XPACK_SECURITY_HTTP_SSL_ENABLED: "false" + XPACK_FLEET_ISAIRGAPPED: "true" I18N_LOCALE: zh-CN - SERVER_PORT: '5601' + SERVER_PORT: "5601" ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ports: - ${KIBANA_PORT:-5601}:5601 healthcheck: - test: [ 'CMD-SHELL', 'curl -s http://localhost:5601 >/dev/null || exit 1' ] + test: ["CMD-SHELL", "curl -s http://localhost:5601 >/dev/null || exit 1"] interval: 30s timeout: 10s retries: 3 From ff7a0e3170492222954843f6ffa31731d538eb7b Mon Sep 17 00:00:00 2001 From: GuanMu Date: Wed, 3 Sep 2025 22:24:45 +0800 Subject: [PATCH 08/46] fix: improve error logging for vector search operation in MyScale (#25087) --- api/core/rag/datasource/vdb/myscale/myscale_vector.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py index 99f766a88a..d048f3b34e 100644 --- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py +++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py @@ -152,8 +152,8 @@ class MyScaleVector(BaseVector): ) for r in self._client.query(sql).named_results() ] - except Exception as e: - logger.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401 + except Exception: + logger.exception("Vector search operation failed") return [] def delete(self) -> None: From db53656a45fdb13447a410b9b4609d991013d89d Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Wed, 3 Sep 2025 22:27:41 +0800 Subject: [PATCH 09/46] Fix jsonschema compliance: use number instead of float (#25049) Signed-off-by: Yongtao Huang Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/mcp/server/streamable_http.py | 2 +- .../core/mcp/server/test_streamable_http.py | 65 ++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/api/core/mcp/server/streamable_http.py b/api/core/mcp/server/streamable_http.py index 5851c6d406..3d51ac2333 100644 --- a/api/core/mcp/server/streamable_http.py +++ b/api/core/mcp/server/streamable_http.py @@ -258,5 +258,5 @@ def convert_input_form_to_parameters( parameters[item.variable]["type"] = "string" parameters[item.variable]["enum"] = item.options elif item.type == VariableEntityType.NUMBER: - parameters[item.variable]["type"] = "float" + parameters[item.variable]["type"] = "number" return parameters, required diff --git a/api/tests/unit_tests/core/mcp/server/test_streamable_http.py b/api/tests/unit_tests/core/mcp/server/test_streamable_http.py index ccc5d42bcf..f1d741602a 100644 --- a/api/tests/unit_tests/core/mcp/server/test_streamable_http.py +++ b/api/tests/unit_tests/core/mcp/server/test_streamable_http.py @@ -1,6 +1,7 @@ import json from unittest.mock import Mock, patch +import jsonschema import pytest from core.app.app_config.entities import VariableEntity, VariableEntityType @@ -434,7 +435,7 @@ class TestUtilityFunctions: assert parameters["category"]["enum"] == ["A", "B", "C"] assert "count" in parameters - assert parameters["count"]["type"] == "float" + assert parameters["count"]["type"] == "number" # FILE type should be skipped - it creates empty dict but gets filtered later # Check that it doesn't have any meaningful content @@ -447,3 +448,65 @@ class TestUtilityFunctions: assert "category" not in required # Note: _get_request_id function has been removed as request_id is now passed as parameter + + def test_convert_input_form_to_parameters_jsonschema_validation_ok(self): + """Current schema uses 'number' for numeric fields; it should be a valid JSON Schema.""" + user_input_form = [ + VariableEntity( + type=VariableEntityType.NUMBER, + variable="count", + description="Count", + label="Count", + required=True, + ), + VariableEntity( + type=VariableEntityType.TEXT_INPUT, + variable="name", + description="User name", + label="Name", + required=False, + ), + ] + + parameters_dict = { + "count": "Enter count", + "name": "Enter your name", + } + + parameters, required = convert_input_form_to_parameters(user_input_form, parameters_dict) + + # Build a complete JSON Schema + schema = { + "type": "object", + "properties": parameters, + "required": required, + } + + # 1) The schema itself must be valid + jsonschema.Draft202012Validator.check_schema(schema) + + # 2) Both float and integer instances should pass validation + jsonschema.validate(instance={"count": 3.14, "name": "alice"}, schema=schema) + jsonschema.validate(instance={"count": 2, "name": "bob"}, schema=schema) + + def test_legacy_float_type_schema_is_invalid(self): + """Legacy/buggy behavior: using 'float' should produce an invalid JSON Schema.""" + # Manually construct a legacy/incorrect schema (simulating old behavior) + bad_schema = { + "type": "object", + "properties": { + "count": { + "type": "float", # Invalid type: JSON Schema does not support 'float' + "description": "Enter count", + } + }, + "required": ["count"], + } + + # The schema itself should raise a SchemaError + with pytest.raises(jsonschema.exceptions.SchemaError): + jsonschema.Draft202012Validator.check_schema(bad_schema) + + # Or validation should also raise SchemaError + with pytest.raises(jsonschema.exceptions.SchemaError): + jsonschema.validate(instance={"count": 1.23}, schema=bad_schema) From aae792a9dd043f1d50a390816302efb61fb4cd3f Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Wed, 3 Sep 2025 22:28:03 +0800 Subject: [PATCH 10/46] chore: Updated pnpm version to 10.15.1 (#25065) --- web/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/package.json b/web/package.json index e64d548824..c736a37281 100644 --- a/web/package.json +++ b/web/package.json @@ -2,7 +2,7 @@ "name": "dify-web", "version": "1.8.1", "private": true, - "packageManager": "pnpm@10.15.0", + "packageManager": "pnpm@10.15.1", "engines": { "node": ">=v22.11.0" }, From a9c7669c16b02df2617e0477e3e1f4e5552c61ab Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Wed, 3 Sep 2025 22:29:08 +0800 Subject: [PATCH 11/46] chore: comply to RFC 6750 and improve bearer token split (#24955) --- api/controllers/console/auth/oauth_server.py | 29 +++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/api/controllers/console/auth/oauth_server.py b/api/controllers/console/auth/oauth_server.py index f730cfa3fe..a8ba417847 100644 --- a/api/controllers/console/auth/oauth_server.py +++ b/api/controllers/console/auth/oauth_server.py @@ -2,7 +2,7 @@ from functools import wraps from typing import cast import flask_login -from flask import request +from flask import jsonify, request from flask_restx import Resource, reqparse from werkzeug.exceptions import BadRequest, NotFound @@ -46,23 +46,38 @@ def oauth_server_access_token_required(view): authorization_header = request.headers.get("Authorization") if not authorization_header: - raise BadRequest("Authorization header is required") + response = jsonify({"error": "Authorization header is required"}) + response.status_code = 401 + response.headers["WWW-Authenticate"] = "Bearer" + return response - parts = authorization_header.strip().split(" ") + parts = authorization_header.strip().split(None, 1) if len(parts) != 2: - raise BadRequest("Invalid Authorization header format") + response = jsonify({"error": "Invalid Authorization header format"}) + response.status_code = 401 + response.headers["WWW-Authenticate"] = "Bearer" + return response token_type = parts[0].strip() if token_type.lower() != "bearer": - raise BadRequest("token_type is invalid") + response = jsonify({"error": "token_type is invalid"}) + response.status_code = 401 + response.headers["WWW-Authenticate"] = "Bearer" + return response access_token = parts[1].strip() if not access_token: - raise BadRequest("access_token is required") + response = jsonify({"error": "access_token is required"}) + response.status_code = 401 + response.headers["WWW-Authenticate"] = "Bearer" + return response account = OAuthServerService.validate_oauth_access_token(oauth_provider_app.client_id, access_token) if not account: - raise BadRequest("access_token or client_id is invalid") + response = jsonify({"error": "access_token or client_id is invalid"}) + response.status_code = 401 + response.headers["WWW-Authenticate"] = "Bearer" + return response kwargs["account"] = account From 56afb3fd64401a2ff30832f37104b89e52d4aeaf Mon Sep 17 00:00:00 2001 From: zz_xu <591933870@qq.com> Date: Wed, 3 Sep 2025 22:44:22 +0800 Subject: [PATCH 12/46] db internal server error (#24947) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- api/.env.example | 1 + 1 file changed, 1 insertion(+) diff --git a/api/.env.example b/api/.env.example index e947c5584b..eb88c114e6 100644 --- a/api/.env.example +++ b/api/.env.example @@ -75,6 +75,7 @@ DB_PASSWORD=difyai123456 DB_HOST=localhost DB_PORT=5432 DB_DATABASE=dify +SQLALCHEMY_POOL_PRE_PING=true # Storage configuration # use for store upload files, private keys... From fe3f03e50af8c0f3127a79bb1596c66509f225ef Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:08:58 +0800 Subject: [PATCH 13/46] feat: add property-based access control to GraphRuntimeState - Replace direct field access with private attributes and property decorators - Implement deep copy protection for mutable objects (dict, LLMUsage) - Add helper methods: set_output(), get_output(), update_outputs() - Add increment_node_run_steps() and add_tokens() convenience methods - Update loop_node and event_handlers to use new accessor methods - Add comprehensive unit tests for immutability and validation - Ensure backward compatibility with existing property access patterns --- .../workflow/entities/graph_runtime_state.py | 144 ++++++++++++++++-- .../event_management/event_handlers.py | 8 +- api/core/workflow/nodes/loop/loop_node.py | 8 +- .../entities/test_graph_runtime_state.py | 114 ++++++++++++++ 4 files changed, 250 insertions(+), 24 deletions(-) create mode 100644 api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py diff --git a/api/core/workflow/entities/graph_runtime_state.py b/api/core/workflow/entities/graph_runtime_state.py index 19aa0d27e6..e9bf2ea26c 100644 --- a/api/core/workflow/entities/graph_runtime_state.py +++ b/api/core/workflow/entities/graph_runtime_state.py @@ -1,6 +1,7 @@ +from copy import deepcopy from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, PrivateAttr from core.model_runtime.entities.llm_entities import LLMUsage @@ -8,21 +9,132 @@ from .variable_pool import VariablePool class GraphRuntimeState(BaseModel): - variable_pool: VariablePool = Field(..., description="variable pool") - """variable pool""" + # Private attributes to prevent direct modification + _variable_pool: VariablePool = PrivateAttr() + _start_at: float = PrivateAttr() + _total_tokens: int = PrivateAttr(default=0) + _llm_usage: LLMUsage = PrivateAttr(default_factory=LLMUsage.empty_usage) + _outputs: dict[str, Any] = PrivateAttr(default_factory=dict) + _node_run_steps: int = PrivateAttr(default=0) - start_at: float = Field(..., description="start time") - """start time""" - total_tokens: int = 0 - """total tokens""" - llm_usage: LLMUsage = LLMUsage.empty_usage() - """llm usage info""" + def __init__( + self, + variable_pool: VariablePool, + start_at: float, + total_tokens: int = 0, + llm_usage: LLMUsage | None = None, + outputs: dict[str, Any] | None = None, + node_run_steps: int = 0, + **kwargs, + ): + """Initialize the GraphRuntimeState with validation.""" + super().__init__(**kwargs) - # The `outputs` field stores the final output values generated by executing workflows or chatflows. - # - # Note: Since the type of this field is `dict[str, Any]`, its values may not remain consistent - # after a serialization and deserialization round trip. - outputs: dict[str, Any] = Field(default_factory=dict) + # Initialize private attributes with validation + self._variable_pool = variable_pool - node_run_steps: int = 0 - """node run steps""" + self._start_at = start_at + + if total_tokens < 0: + raise ValueError("total_tokens must be non-negative") + self._total_tokens = total_tokens + + if llm_usage is None: + llm_usage = LLMUsage.empty_usage() + self._llm_usage = llm_usage + + if outputs is None: + outputs = {} + self._outputs = deepcopy(outputs) + + if node_run_steps < 0: + raise ValueError("node_run_steps must be non-negative") + self._node_run_steps = node_run_steps + + @property + def variable_pool(self) -> VariablePool: + """Get the variable pool.""" + return self._variable_pool + + @variable_pool.setter + def variable_pool(self, value: VariablePool) -> None: + """Set the variable pool.""" + self._variable_pool = value + + @property + def start_at(self) -> float: + """Get the start time.""" + return self._start_at + + @start_at.setter + def start_at(self, value: float) -> None: + """Set the start time.""" + self._start_at = value + + @property + def total_tokens(self) -> int: + """Get the total tokens count.""" + return self._total_tokens + + @total_tokens.setter + def total_tokens(self, value: int): + """Set the total tokens count.""" + if value < 0: + raise ValueError("total_tokens must be non-negative") + self._total_tokens = value + + @property + def llm_usage(self) -> LLMUsage: + """Get the LLM usage info.""" + # Return a copy to prevent external modification + return self._llm_usage.model_copy() + + @llm_usage.setter + def llm_usage(self, value: LLMUsage): + """Set the LLM usage info.""" + self._llm_usage = value.model_copy() + + @property + def outputs(self) -> dict[str, Any]: + """Get a copy of the outputs dictionary.""" + return deepcopy(self._outputs) + + @outputs.setter + def outputs(self, value: dict[str, Any]) -> None: + """Set the outputs dictionary.""" + self._outputs = deepcopy(value) + + def set_output(self, key: str, value: Any) -> None: + """Set a single output value.""" + self._outputs[key] = deepcopy(value) + + def get_output(self, key: str, default: Any = None) -> Any: + """Get a single output value.""" + return deepcopy(self._outputs.get(key, default)) + + def update_outputs(self, updates: dict[str, Any]) -> None: + """Update multiple output values.""" + for key, value in updates.items(): + self._outputs[key] = deepcopy(value) + + @property + def node_run_steps(self) -> int: + """Get the node run steps count.""" + return self._node_run_steps + + @node_run_steps.setter + def node_run_steps(self, value: int) -> None: + """Set the node run steps count.""" + if value < 0: + raise ValueError("node_run_steps must be non-negative") + self._node_run_steps = value + + def increment_node_run_steps(self) -> None: + """Increment the node run steps by 1.""" + self._node_run_steps += 1 + + def add_tokens(self, tokens: int) -> None: + """Add tokens to the total count.""" + if tokens < 0: + raise ValueError("tokens must be non-negative") + self._total_tokens += tokens diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index 4efb5991ec..3ab69776a4 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -267,10 +267,10 @@ class EventHandler: # in runtime state, rather than allowing nodes to directly access runtime state. for key, value in event.node_run_result.outputs.items(): if key == "answer": - existing = self._graph_runtime_state.outputs.get("answer", "") + existing = self._graph_runtime_state.get_output("answer", "") if existing: - self._graph_runtime_state.outputs["answer"] = f"{existing}{value}" + self._graph_runtime_state.set_output("answer", f"{existing}{value}") else: - self._graph_runtime_state.outputs["answer"] = value + self._graph_runtime_state.set_output("answer", value) else: - self._graph_runtime_state.outputs[key] = value + self._graph_runtime_state.set_output(key, value) diff --git a/api/core/workflow/nodes/loop/loop_node.py b/api/core/workflow/nodes/loop/loop_node.py index dffeee66f5..11d46fe998 100644 --- a/api/core/workflow/nodes/loop/loop_node.py +++ b/api/core/workflow/nodes/loop/loop_node.py @@ -147,14 +147,14 @@ class LoopNode(Node): for key, value in graph_engine.graph_runtime_state.outputs.items(): if key == "answer": # Concatenate answer outputs with newline - existing_answer = self.graph_runtime_state.outputs.get("answer", "") + existing_answer = self.graph_runtime_state.get_output("answer", "") if existing_answer: - self.graph_runtime_state.outputs["answer"] = f"{existing_answer}{value}" + self.graph_runtime_state.set_output("answer", f"{existing_answer}{value}") else: - self.graph_runtime_state.outputs["answer"] = value + self.graph_runtime_state.set_output("answer", value) else: # For other outputs, just update - self.graph_runtime_state.outputs[key] = value + self.graph_runtime_state.set_output(key, value) # Update the total tokens from this iteration cost_tokens += graph_engine.graph_runtime_state.total_tokens diff --git a/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py b/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py new file mode 100644 index 0000000000..61c1fd3181 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py @@ -0,0 +1,114 @@ +from time import time + +import pytest + +from core.workflow.entities.graph_runtime_state import GraphRuntimeState +from core.workflow.entities.variable_pool import VariablePool + + +class TestGraphRuntimeState: + def test_property_getters_and_setters(self): + # FIXME(-LAN-): Mock VariablePool if needed + variable_pool = VariablePool() + start_time = time() + + state = GraphRuntimeState(variable_pool=variable_pool, start_at=start_time) + + # Test variable_pool property + assert state.variable_pool == variable_pool + new_pool = VariablePool() + state.variable_pool = new_pool + assert state.variable_pool == new_pool + + # Test start_at property + assert state.start_at == start_time + new_time = time() + 100 + state.start_at = new_time + assert state.start_at == new_time + + # Test total_tokens property + assert state.total_tokens == 0 + state.total_tokens = 100 + assert state.total_tokens == 100 + + # Test node_run_steps property + assert state.node_run_steps == 0 + state.node_run_steps = 5 + assert state.node_run_steps == 5 + + def test_outputs_immutability(self): + variable_pool = VariablePool() + state = GraphRuntimeState(variable_pool=variable_pool, start_at=time()) + + # Test that getting outputs returns a copy + outputs1 = state.outputs + outputs2 = state.outputs + assert outputs1 == outputs2 + assert outputs1 is not outputs2 # Different objects + + # Test that modifying retrieved outputs doesn't affect internal state + outputs = state.outputs + outputs["test"] = "value" + assert "test" not in state.outputs + + # Test set_output method + state.set_output("key1", "value1") + assert state.get_output("key1") == "value1" + + # Test update_outputs method + state.update_outputs({"key2": "value2", "key3": "value3"}) + assert state.get_output("key2") == "value2" + assert state.get_output("key3") == "value3" + + def test_llm_usage_immutability(self): + variable_pool = VariablePool() + state = GraphRuntimeState(variable_pool=variable_pool, start_at=time()) + + # Test that getting llm_usage returns a copy + usage1 = state.llm_usage + usage2 = state.llm_usage + assert usage1 is not usage2 # Different objects + + def test_type_validation(self): + variable_pool = VariablePool() + state = GraphRuntimeState(variable_pool=variable_pool, start_at=time()) + + # Test total_tokens validation + with pytest.raises(ValueError): + state.total_tokens = -1 + + # Test node_run_steps validation + with pytest.raises(ValueError): + state.node_run_steps = -1 + + def test_helper_methods(self): + variable_pool = VariablePool() + state = GraphRuntimeState(variable_pool=variable_pool, start_at=time()) + + # Test increment_node_run_steps + initial_steps = state.node_run_steps + state.increment_node_run_steps() + assert state.node_run_steps == initial_steps + 1 + + # Test add_tokens + initial_tokens = state.total_tokens + state.add_tokens(50) + assert state.total_tokens == initial_tokens + 50 + + # Test add_tokens validation + with pytest.raises(ValueError): + state.add_tokens(-1) + + def test_deep_copy_for_nested_objects(self): + variable_pool = VariablePool() + state = GraphRuntimeState(variable_pool=variable_pool, start_at=time()) + + # Test deep copy for nested dict + nested_data = {"level1": {"level2": {"value": "test"}}} + state.set_output("nested", nested_data) + + retrieved = state.get_output("nested") + retrieved["level1"]["level2"]["value"] = "modified" + + # Original should remain unchanged + assert state.get_output("nested")["level1"]["level2"]["value"] == "test" From 8332472944e6af039a4de4ca711b3ab6e4df1f14 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:11:31 +0800 Subject: [PATCH 14/46] refactor(graph_engine): rename Layer to GraphEngineLayer Signed-off-by: -LAN- --- .../workflow/graph_engine/event_management/event_manager.py | 6 +++--- api/core/workflow/graph_engine/graph_engine.py | 6 +++--- api/core/workflow/graph_engine/layers/__init__.py | 4 ++-- api/core/workflow/graph_engine/layers/base.py | 2 +- api/core/workflow/graph_engine/layers/debug_logging.py | 4 ++-- api/core/workflow/graph_engine/layers/execution_limits.py | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/api/core/workflow/graph_engine/event_management/event_manager.py b/api/core/workflow/graph_engine/event_management/event_manager.py index d34f4e032b..6f37193070 100644 --- a/api/core/workflow/graph_engine/event_management/event_manager.py +++ b/api/core/workflow/graph_engine/event_management/event_manager.py @@ -9,7 +9,7 @@ from typing import final from core.workflow.graph_events import GraphEngineEvent -from ..layers.base import Layer +from ..layers.base import GraphEngineLayer @final @@ -104,10 +104,10 @@ class EventManager: """Initialize the event manager.""" self._events: list[GraphEngineEvent] = [] self._lock = ReadWriteLock() - self._layers: list[Layer] = [] + self._layers: list[GraphEngineLayer] = [] self._execution_complete = threading.Event() - def set_layers(self, layers: list[Layer]) -> None: + def set_layers(self, layers: list[GraphEngineLayer]) -> None: """ Set the layers to notify on event collection. diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 833cee0ffe..7fd2825020 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -33,7 +33,7 @@ from .entities.commands import AbortCommand from .error_handling import ErrorHandler from .event_management import EventHandler, EventManager from .graph_traversal import EdgeProcessor, SkipPropagator -from .layers.base import Layer +from .layers.base import GraphEngineLayer from .orchestration import Dispatcher, ExecutionCoordinator from .protocols.command_channel import CommandChannel from .response_coordinator import ResponseStreamCoordinator @@ -221,7 +221,7 @@ class GraphEngine: # === Extensibility === # Layers allow plugins to extend engine functionality - self._layers: list[Layer] = [] + self._layers: list[GraphEngineLayer] = [] # === Validation === # Ensure all nodes share the same GraphRuntimeState instance @@ -234,7 +234,7 @@ class GraphEngine: if id(node.graph_runtime_state) != expected_state_id: raise ValueError(f"GraphRuntimeState consistency violation: Node '{node.id}' has a different instance") - def layer(self, layer: Layer) -> "GraphEngine": + def layer(self, layer: GraphEngineLayer) -> "GraphEngine": """Add a layer for extending functionality.""" self._layers.append(layer) return self diff --git a/api/core/workflow/graph_engine/layers/__init__.py b/api/core/workflow/graph_engine/layers/__init__.py index 4749c74044..0a29a52993 100644 --- a/api/core/workflow/graph_engine/layers/__init__.py +++ b/api/core/workflow/graph_engine/layers/__init__.py @@ -5,12 +5,12 @@ This module provides the layer infrastructure for extending GraphEngine function with middleware-like components that can observe events and interact with execution. """ -from .base import Layer +from .base import GraphEngineLayer from .debug_logging import DebugLoggingLayer from .execution_limits import ExecutionLimitsLayer __all__ = [ "DebugLoggingLayer", "ExecutionLimitsLayer", - "Layer", + "GraphEngineLayer", ] diff --git a/api/core/workflow/graph_engine/layers/base.py b/api/core/workflow/graph_engine/layers/base.py index febdc3de6d..9899d46016 100644 --- a/api/core/workflow/graph_engine/layers/base.py +++ b/api/core/workflow/graph_engine/layers/base.py @@ -12,7 +12,7 @@ from core.workflow.graph_engine.protocols.command_channel import CommandChannel from core.workflow.graph_events import GraphEngineEvent -class Layer(ABC): +class GraphEngineLayer(ABC): """ Abstract base class for GraphEngine layers. diff --git a/api/core/workflow/graph_engine/layers/debug_logging.py b/api/core/workflow/graph_engine/layers/debug_logging.py index 42bacfa474..ddfdfa0edd 100644 --- a/api/core/workflow/graph_engine/layers/debug_logging.py +++ b/api/core/workflow/graph_engine/layers/debug_logging.py @@ -33,11 +33,11 @@ from core.workflow.graph_events import ( NodeRunSucceededEvent, ) -from .base import Layer +from .base import GraphEngineLayer @final -class DebugLoggingLayer(Layer): +class DebugLoggingLayer(GraphEngineLayer): """ A layer that provides comprehensive logging of GraphEngine execution. diff --git a/api/core/workflow/graph_engine/layers/execution_limits.py b/api/core/workflow/graph_engine/layers/execution_limits.py index 6cc0c1305a..d74dc9b082 100644 --- a/api/core/workflow/graph_engine/layers/execution_limits.py +++ b/api/core/workflow/graph_engine/layers/execution_limits.py @@ -16,7 +16,7 @@ from typing import final from typing_extensions import override from core.workflow.graph_engine.entities.commands import AbortCommand, CommandType -from core.workflow.graph_engine.layers import Layer +from core.workflow.graph_engine.layers import GraphEngineLayer from core.workflow.graph_events import ( GraphEngineEvent, NodeRunStartedEvent, @@ -32,7 +32,7 @@ class LimitType(Enum): @final -class ExecutionLimitsLayer(Layer): +class ExecutionLimitsLayer(GraphEngineLayer): """ Layer that enforces execution limits for workflows. From 61c79b0013ad3ac8fe8e825a225b6fabd08fb0ef Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:15:46 +0800 Subject: [PATCH 15/46] test: correct imported name --- .../core/workflow/graph_engine/test_graph_engine.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py b/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py index f0774f7a29..11eecb6d77 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_graph_engine.py @@ -498,10 +498,10 @@ def test_layer_system_basic(): def test_layer_chaining(): """Test chaining multiple layers.""" - from core.workflow.graph_engine.layers import DebugLoggingLayer, Layer + from core.workflow.graph_engine.layers import DebugLoggingLayer, GraphEngineLayer # Create a custom test layer - class TestLayer(Layer): + class TestLayer(GraphEngineLayer): def __init__(self): super().__init__() self.events_received = [] @@ -560,10 +560,10 @@ def test_layer_chaining(): def test_layer_error_handling(): """Test that layer errors don't crash the engine.""" - from core.workflow.graph_engine.layers import Layer + from core.workflow.graph_engine.layers import GraphEngineLayer # Create a layer that throws errors - class FaultyLayer(Layer): + class FaultyLayer(GraphEngineLayer): def on_graph_start(self): raise RuntimeError("Intentional error in on_graph_start") From 16e9cd5ac55474469c77c9408ec8039514da8d79 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:20:19 +0800 Subject: [PATCH 16/46] feat(graph_runtime_state): prevent to set variable pool after initialized. --- api/core/workflow/entities/graph_runtime_state.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/api/core/workflow/entities/graph_runtime_state.py b/api/core/workflow/entities/graph_runtime_state.py index e9bf2ea26c..36662a065e 100644 --- a/api/core/workflow/entities/graph_runtime_state.py +++ b/api/core/workflow/entities/graph_runtime_state.py @@ -56,11 +56,6 @@ class GraphRuntimeState(BaseModel): """Get the variable pool.""" return self._variable_pool - @variable_pool.setter - def variable_pool(self, value: VariablePool) -> None: - """Set the variable pool.""" - self._variable_pool = value - @property def start_at(self) -> float: """Get the start time.""" From 657c27ec754606ff24fcd552c7f4af7da903f01e Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:30:40 +0800 Subject: [PATCH 17/46] feat(graph_engine): make runtime state read-only in layer Signed-off-by: -LAN- --- api/core/workflow/graph/__init__.py | 13 +++- .../graph/graph_runtime_state_protocol.py | 59 ++++++++++++++ .../workflow/graph/read_only_state_wrapper.py | 76 +++++++++++++++++++ .../workflow/graph_engine/graph_engine.py | 5 +- api/core/workflow/graph_engine/layers/base.py | 14 ++-- 5 files changed, 158 insertions(+), 9 deletions(-) create mode 100644 api/core/workflow/graph/graph_runtime_state_protocol.py create mode 100644 api/core/workflow/graph/read_only_state_wrapper.py diff --git a/api/core/workflow/graph/__init__.py b/api/core/workflow/graph/__init__.py index 6bfed26c44..900e704b65 100644 --- a/api/core/workflow/graph/__init__.py +++ b/api/core/workflow/graph/__init__.py @@ -1,5 +1,16 @@ from .edge import Edge from .graph import Graph, NodeFactory +from .graph_runtime_state_protocol import ReadOnlyGraphRuntimeState, ReadOnlyVariablePool from .graph_template import GraphTemplate +from .read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper, ReadOnlyVariablePoolWrapper -__all__ = ["Edge", "Graph", "GraphTemplate", "NodeFactory"] +__all__ = [ + "Edge", + "Graph", + "GraphTemplate", + "NodeFactory", + "ReadOnlyGraphRuntimeState", + "ReadOnlyVariablePool", + "ReadOnlyGraphRuntimeStateWrapper", + "ReadOnlyVariablePoolWrapper", +] diff --git a/api/core/workflow/graph/graph_runtime_state_protocol.py b/api/core/workflow/graph/graph_runtime_state_protocol.py new file mode 100644 index 0000000000..173076eb19 --- /dev/null +++ b/api/core/workflow/graph/graph_runtime_state_protocol.py @@ -0,0 +1,59 @@ +from typing import Any, Protocol + +from core.model_runtime.entities.llm_entities import LLMUsage + + +class ReadOnlyVariablePool(Protocol): + """Read-only interface for VariablePool.""" + + def get(self, node_id: str, variable_key: str) -> Any: + """Get a variable value (read-only).""" + ... + + def get_all_by_node(self, node_id: str) -> dict[str, Any]: + """Get all variables for a node (read-only).""" + ... + + +class ReadOnlyGraphRuntimeState(Protocol): + """ + Read-only view of GraphRuntimeState for layers. + + This protocol defines a read-only interface that prevents layers from + modifying the graph runtime state while still allowing observation. + All methods return defensive copies to ensure immutability. + """ + + @property + def variable_pool(self) -> ReadOnlyVariablePool: + """Get read-only access to the variable pool.""" + ... + + @property + def start_at(self) -> float: + """Get the start time (read-only).""" + ... + + @property + def total_tokens(self) -> int: + """Get the total tokens count (read-only).""" + ... + + @property + def llm_usage(self) -> LLMUsage: + """Get a copy of LLM usage info (read-only).""" + ... + + @property + def outputs(self) -> dict[str, Any]: + """Get a defensive copy of outputs (read-only).""" + ... + + @property + def node_run_steps(self) -> int: + """Get the node run steps count (read-only).""" + ... + + def get_output(self, key: str, default: Any = None) -> Any: + """Get a single output value (returns a copy).""" + ... \ No newline at end of file diff --git a/api/core/workflow/graph/read_only_state_wrapper.py b/api/core/workflow/graph/read_only_state_wrapper.py new file mode 100644 index 0000000000..f643baf5fc --- /dev/null +++ b/api/core/workflow/graph/read_only_state_wrapper.py @@ -0,0 +1,76 @@ +from copy import deepcopy +from typing import Any + +from core.model_runtime.entities.llm_entities import LLMUsage +from core.workflow.entities.graph_runtime_state import GraphRuntimeState +from core.workflow.entities.variable_pool import VariablePool + + +class ReadOnlyVariablePoolWrapper: + """Wrapper that provides read-only access to VariablePool.""" + + def __init__(self, variable_pool: VariablePool): + self._variable_pool = variable_pool + + def get(self, node_id: str, variable_key: str) -> Any: + """Get a variable value (returns a defensive copy).""" + value = self._variable_pool.get(node_id, variable_key) + return deepcopy(value) if value is not None else None + + def get_all_by_node(self, node_id: str) -> dict[str, Any]: + """Get all variables for a node (returns defensive copies).""" + variables = {} + if node_id in self._variable_pool.variable_dictionary: + for key, var in self._variable_pool.variable_dictionary[node_id].items(): + # FIXME(-LAN-): Handle the actual Variable object structure + value = var.value if hasattr(var, "value") else var + variables[key] = deepcopy(value) + return variables + + +class ReadOnlyGraphRuntimeStateWrapper: + """ + Wrapper that provides read-only access to GraphRuntimeState. + + This wrapper ensures that layers can observe the state without + modifying it. All returned values are defensive copies. + """ + + def __init__(self, state: GraphRuntimeState): + self._state = state + self._variable_pool_wrapper = ReadOnlyVariablePoolWrapper(state.variable_pool) + + @property + def variable_pool(self) -> ReadOnlyVariablePoolWrapper: + """Get read-only access to the variable pool.""" + return self._variable_pool_wrapper + + @property + def start_at(self) -> float: + """Get the start time (read-only).""" + return self._state.start_at + + @property + def total_tokens(self) -> int: + """Get the total tokens count (read-only).""" + return self._state.total_tokens + + @property + def llm_usage(self) -> LLMUsage: + """Get a copy of LLM usage info (read-only).""" + # Return a copy to prevent modification + return self._state.llm_usage.model_copy() + + @property + def outputs(self) -> dict[str, Any]: + """Get a defensive copy of outputs (read-only).""" + return deepcopy(self._state.outputs) + + @property + def node_run_steps(self) -> int: + """Get the node run steps count (read-only).""" + return self._state.node_run_steps + + def get_output(self, key: str, default: Any = None) -> Any: + """Get a single output value (returns a copy).""" + return self._state.get_output(key, default) \ No newline at end of file diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 7fd2825020..fbb63dff8b 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -16,6 +16,7 @@ from flask import Flask, current_app from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities import GraphRuntimeState from core.workflow.enums import NodeExecutionType +from core.workflow.graph.read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper from core.workflow.graph import Graph from core.workflow.graph_events import ( GraphEngineEvent, @@ -288,9 +289,11 @@ class GraphEngine: def _initialize_layers(self) -> None: """Initialize layers with context.""" self._event_manager.set_layers(self._layers) + # Create a read-only wrapper for the runtime state + read_only_state = ReadOnlyGraphRuntimeStateWrapper(self._graph_runtime_state) for layer in self._layers: try: - layer.initialize(self._graph_runtime_state, self._command_channel) + layer.initialize(read_only_state, self._command_channel) except Exception as e: logger.warning("Failed to initialize layer %s: %s", layer.__class__.__name__, e) diff --git a/api/core/workflow/graph_engine/layers/base.py b/api/core/workflow/graph_engine/layers/base.py index 9899d46016..dfac49e11a 100644 --- a/api/core/workflow/graph_engine/layers/base.py +++ b/api/core/workflow/graph_engine/layers/base.py @@ -7,7 +7,7 @@ intercept and respond to GraphEngine events. from abc import ABC, abstractmethod -from core.workflow.entities import GraphRuntimeState +from core.workflow.graph.graph_runtime_state_protocol import ReadOnlyGraphRuntimeState from core.workflow.graph_engine.protocols.command_channel import CommandChannel from core.workflow.graph_events import GraphEngineEvent @@ -27,19 +27,19 @@ class GraphEngineLayer(ABC): def __init__(self) -> None: """Initialize the layer. Subclasses can override with custom parameters.""" - self.graph_runtime_state: GraphRuntimeState | None = None + self.graph_runtime_state: ReadOnlyGraphRuntimeState | None = None self.command_channel: CommandChannel | None = None - def initialize(self, graph_runtime_state: GraphRuntimeState, command_channel: CommandChannel) -> None: + def initialize(self, graph_runtime_state: ReadOnlyGraphRuntimeState, command_channel: CommandChannel) -> None: """ Initialize the layer with engine dependencies. - Called by GraphEngine before execution starts to inject the runtime state - and command channel. This allows layers to access engine context and send - commands. + Called by GraphEngine before execution starts to inject the read-only runtime state + and command channel. This allows layers to observe engine context and send + commands, but prevents direct state modification. Args: - graph_runtime_state: The runtime state of the graph execution + graph_runtime_state: Read-only view of the runtime state command_channel: Channel for sending commands to the engine """ self.graph_runtime_state = graph_runtime_state From 04bbf540d9a55c3ea001b6b5d14634b73ca837a6 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:33:53 +0800 Subject: [PATCH 18/46] chore: code format Signed-off-by: -LAN- --- api/core/workflow/graph/__init__.py | 2 +- api/core/workflow/graph/graph_runtime_state_protocol.py | 4 ++-- api/core/workflow/graph/read_only_state_wrapper.py | 4 ++-- api/core/workflow/graph_engine/graph_engine.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/core/workflow/graph/__init__.py b/api/core/workflow/graph/__init__.py index 900e704b65..31a81d494e 100644 --- a/api/core/workflow/graph/__init__.py +++ b/api/core/workflow/graph/__init__.py @@ -10,7 +10,7 @@ __all__ = [ "GraphTemplate", "NodeFactory", "ReadOnlyGraphRuntimeState", - "ReadOnlyVariablePool", "ReadOnlyGraphRuntimeStateWrapper", + "ReadOnlyVariablePool", "ReadOnlyVariablePoolWrapper", ] diff --git a/api/core/workflow/graph/graph_runtime_state_protocol.py b/api/core/workflow/graph/graph_runtime_state_protocol.py index 173076eb19..a5c8db333a 100644 --- a/api/core/workflow/graph/graph_runtime_state_protocol.py +++ b/api/core/workflow/graph/graph_runtime_state_protocol.py @@ -18,7 +18,7 @@ class ReadOnlyVariablePool(Protocol): class ReadOnlyGraphRuntimeState(Protocol): """ Read-only view of GraphRuntimeState for layers. - + This protocol defines a read-only interface that prevents layers from modifying the graph runtime state while still allowing observation. All methods return defensive copies to ensure immutability. @@ -56,4 +56,4 @@ class ReadOnlyGraphRuntimeState(Protocol): def get_output(self, key: str, default: Any = None) -> Any: """Get a single output value (returns a copy).""" - ... \ No newline at end of file + ... diff --git a/api/core/workflow/graph/read_only_state_wrapper.py b/api/core/workflow/graph/read_only_state_wrapper.py index f643baf5fc..3562106a4c 100644 --- a/api/core/workflow/graph/read_only_state_wrapper.py +++ b/api/core/workflow/graph/read_only_state_wrapper.py @@ -31,7 +31,7 @@ class ReadOnlyVariablePoolWrapper: class ReadOnlyGraphRuntimeStateWrapper: """ Wrapper that provides read-only access to GraphRuntimeState. - + This wrapper ensures that layers can observe the state without modifying it. All returned values are defensive copies. """ @@ -73,4 +73,4 @@ class ReadOnlyGraphRuntimeStateWrapper: def get_output(self, key: str, default: Any = None) -> Any: """Get a single output value (returns a copy).""" - return self._state.get_output(key, default) \ No newline at end of file + return self._state.get_output(key, default) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index fbb63dff8b..fb3c0aadf6 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -16,8 +16,8 @@ from flask import Flask, current_app from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities import GraphRuntimeState from core.workflow.enums import NodeExecutionType -from core.workflow.graph.read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper from core.workflow.graph import Graph +from core.workflow.graph.read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper from core.workflow.graph_events import ( GraphEngineEvent, GraphNodeEventBase, From ed22d04ea0356b6896f033efca2e80b5822a03ed Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 02:42:36 +0800 Subject: [PATCH 19/46] test: remove outdated test case --- .../core/workflow/entities/test_graph_runtime_state.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py b/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py index 61c1fd3181..4d8483ce0d 100644 --- a/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py +++ b/api/tests/unit_tests/core/workflow/entities/test_graph_runtime_state.py @@ -14,11 +14,8 @@ class TestGraphRuntimeState: state = GraphRuntimeState(variable_pool=variable_pool, start_at=start_time) - # Test variable_pool property + # Test variable_pool property (read-only) assert state.variable_pool == variable_pool - new_pool = VariablePool() - state.variable_pool = new_pool - assert state.variable_pool == new_pool # Test start_at property assert state.start_at == start_time From 017a75aa444623f42b5516d211c3d7b9dcf2e1a8 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 09:34:50 +0800 Subject: [PATCH 20/46] chore: enhance basedpyright-check script to support path arguments (#25108) --- dev/basedpyright-check | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/dev/basedpyright-check b/dev/basedpyright-check index 267ef2a522..ef58ed1f57 100755 --- a/dev/basedpyright-check +++ b/dev/basedpyright-check @@ -5,5 +5,12 @@ set -x SCRIPT_DIR="$(dirname "$(realpath "$0")")" cd "$SCRIPT_DIR/.." +# Get the path argument if provided +PATH_TO_CHECK="$1" + # run basedpyright checks -uv run --directory api --dev basedpyright +if [ -n "$PATH_TO_CHECK" ]; then + uv run --directory api --dev basedpyright "$PATH_TO_CHECK" +else + uv run --directory api --dev basedpyright +fi From 53c4a8787f13b5e2d53664a4fc982ba816de877b Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 09:35:32 +0800 Subject: [PATCH 21/46] [Chore/Refactor] Improve type safety and resolve type checking issues (#25104) --- .../rag/datasource/vdb/tablestore/tablestore_vector.py | 10 ++++++++-- .../storage/clickzetta_volume/file_lifecycle.py | 4 ++-- api/pyrightconfig.json | 1 - api/services/dataset_service.py | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py index dbb25d289d..9c55351522 100644 --- a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py +++ b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py @@ -1,6 +1,7 @@ import json import logging import math +from collections.abc import Iterable from typing import Any, Optional import tablestore # type: ignore @@ -102,9 +103,12 @@ class TableStoreVector(BaseVector): return uuids def text_exists(self, id: str) -> bool: - _, return_row, _ = self._tablestore_client.get_row( + result = self._tablestore_client.get_row( table_name=self._table_name, primary_key=[("id", id)], columns_to_get=["id"] ) + assert isinstance(result, tuple | list) + # Unpack the tuple result + _, return_row, _ = result return return_row is not None @@ -169,6 +173,7 @@ class TableStoreVector(BaseVector): def _create_search_index_if_not_exist(self, dimension: int) -> None: search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name) + assert isinstance(search_index_list, Iterable) if self._index_name in [t[1] for t in search_index_list]: logger.info("Tablestore system index[%s] already exists", self._index_name) return None @@ -212,6 +217,7 @@ class TableStoreVector(BaseVector): def _delete_table_if_exist(self): search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name) + assert isinstance(search_index_list, Iterable) for resp_tuple in search_index_list: self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1]) logger.info("Tablestore delete index[%s] successfully.", self._index_name) @@ -269,7 +275,7 @@ class TableStoreVector(BaseVector): ) if search_response is not None: - rows.extend([row[0][0][1] for row in search_response.rows]) + rows.extend([row[0][0][1] for row in list(search_response.rows)]) if search_response is None or search_response.next_token == b"": break diff --git a/api/extensions/storage/clickzetta_volume/file_lifecycle.py b/api/extensions/storage/clickzetta_volume/file_lifecycle.py index f5d6fd6f22..c41344774f 100644 --- a/api/extensions/storage/clickzetta_volume/file_lifecycle.py +++ b/api/extensions/storage/clickzetta_volume/file_lifecycle.py @@ -1,8 +1,8 @@ """ClickZetta Volume file lifecycle management This module provides file lifecycle management features including version control, -automatic cleanup, backup and restore. Supports complete lifecycle management for -knowledge base files. +automatic cleanup, backup and restore. +Supports complete lifecycle management for knowledge base files. """ import json diff --git a/api/pyrightconfig.json b/api/pyrightconfig.json index 28ccbafd0b..80fd10558e 100644 --- a/api/pyrightconfig.json +++ b/api/pyrightconfig.json @@ -5,7 +5,6 @@ "pythonVersion": "3.11", "pythonPlatform": "All", "reportMissingTypeStubs": false, - "reportGeneralTypeIssues": "none", "reportOptionalMemberAccess": "none", "reportOptionalIterable": "none", "reportOptionalOperand": "none", diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 2e057b81c2..a5550c7012 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -1093,7 +1093,7 @@ class DocumentService: account: Account | Any, dataset_process_rule: Optional[DatasetProcessRule] = None, created_from: str = "web", - ): + ) -> tuple[list[Document], str]: # check doc_form DatasetService.check_doc_form(dataset, knowledge_config.doc_form) # check document limit From 8effbaf101fe09c89f36a9902aeac31e83586f3c Mon Sep 17 00:00:00 2001 From: znn Date: Thu, 4 Sep 2025 07:33:13 +0530 Subject: [PATCH 22/46] make icon consistent in dropdown (#25109) --- web/app/components/header/nav/nav-selector/index.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/app/components/header/nav/nav-selector/index.tsx b/web/app/components/header/nav/nav-selector/index.tsx index 77cf348da2..3858758195 100644 --- a/web/app/components/header/nav/nav-selector/index.tsx +++ b/web/app/components/header/nav/nav-selector/index.tsx @@ -11,7 +11,7 @@ import { useRouter } from 'next/navigation' import { debounce } from 'lodash-es' import cn from '@/utils/classnames' import AppIcon from '@/app/components/base/app-icon' -import { AiText, ChatBot, CuteRobot } from '@/app/components/base/icons/src/vender/solid/communication' +import { AiText, BubbleTextMod, ChatBot, CuteRobot } from '@/app/components/base/icons/src/vender/solid/communication' import { Route } from '@/app/components/base/icons/src/vender/solid/mapsAndTravel' import { useAppContext } from '@/context/app-context' import { useStore as useAppStore } from '@/app/components/app/store' @@ -90,7 +90,7 @@ const NavSelector = ({ curNav, navs, createText, isApp, onCreate, onLoadmore }: 'absolute -bottom-0.5 -right-0.5 h-3.5 w-3.5 rounded border-[0.5px] border-[rgba(0,0,0,0.02)] bg-white p-0.5 shadow-sm', )}> {nav.mode === 'advanced-chat' && ( - + )} {nav.mode === 'agent-chat' && ( From 3427f19a01d7fa8f77e46f3aff3bff498753fc63 Mon Sep 17 00:00:00 2001 From: Will Date: Thu, 4 Sep 2025 10:29:12 +0800 Subject: [PATCH 23/46] chore: improved trace info for generating conversation name (#25118) --- api/core/app/task_pipeline/message_cycle_manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/api/core/app/task_pipeline/message_cycle_manager.py b/api/core/app/task_pipeline/message_cycle_manager.py index bd4d218ce0..8ea4a4ec38 100644 --- a/api/core/app/task_pipeline/message_cycle_manager.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -99,12 +99,13 @@ class MessageCycleManager: # generate conversation name try: - name = LLMGenerator.generate_conversation_name(app_model.tenant_id, query) + name = LLMGenerator.generate_conversation_name( + app_model.tenant_id, query, conversation_id, conversation.app_id + ) conversation.name = name except Exception: if dify_config.DEBUG: logger.exception("generate conversation name failed, conversation_id: %s", conversation_id) - pass db.session.merge(conversation) db.session.commit() From ac057a2d4019293d544e5d0a081ccca0850fc7c1 Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Thu, 4 Sep 2025 10:30:04 +0800 Subject: [PATCH 24/46] Chore: remove dead code in class Graph (#22791) Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> --- api/core/model_runtime/README.md | 2 +- api/core/model_runtime/README_CN.md | 2 +- .../workflow/graph_engine/entities/graph.py | 49 ++----------------- .../core/workflow/graph_engine/test_graph.py | 11 ----- 4 files changed, 5 insertions(+), 59 deletions(-) diff --git a/api/core/model_runtime/README.md b/api/core/model_runtime/README.md index 3abb3f63ac..a6caa7eb1e 100644 --- a/api/core/model_runtime/README.md +++ b/api/core/model_runtime/README.md @@ -7,7 +7,7 @@ This module provides the interface for invoking and authenticating various model ## Features -- Supports capability invocation for 5 types of models +- Supports capability invocation for 6 types of models - `LLM` - LLM text completion, dialogue, pre-computed tokens capability - `Text Embedding Model` - Text Embedding, pre-computed tokens capability diff --git a/api/core/model_runtime/README_CN.md b/api/core/model_runtime/README_CN.md index 19846481e0..dfe614347a 100644 --- a/api/core/model_runtime/README_CN.md +++ b/api/core/model_runtime/README_CN.md @@ -7,7 +7,7 @@ ## 功能介绍 -- 支持 5 种模型类型的能力调用 +- 支持 6 种模型类型的能力调用 - `LLM` - LLM 文本补全、对话,预计算 tokens 能力 - `Text Embedding Model` - 文本 Embedding,预计算 tokens 能力 diff --git a/api/core/workflow/graph_engine/entities/graph.py b/api/core/workflow/graph_engine/entities/graph.py index 362777a199..49984806c9 100644 --- a/api/core/workflow/graph_engine/entities/graph.py +++ b/api/core/workflow/graph_engine/entities/graph.py @@ -204,47 +204,6 @@ class Graph(BaseModel): return graph - def add_extra_edge( - self, source_node_id: str, target_node_id: str, run_condition: Optional[RunCondition] = None - ) -> None: - """ - Add extra edge to the graph - - :param source_node_id: source node id - :param target_node_id: target node id - :param run_condition: run condition - """ - if source_node_id not in self.node_ids or target_node_id not in self.node_ids: - return - - if source_node_id not in self.edge_mapping: - self.edge_mapping[source_node_id] = [] - - if target_node_id in [graph_edge.target_node_id for graph_edge in self.edge_mapping[source_node_id]]: - return - - graph_edge = GraphEdge( - source_node_id=source_node_id, target_node_id=target_node_id, run_condition=run_condition - ) - - self.edge_mapping[source_node_id].append(graph_edge) - - def get_leaf_node_ids(self) -> list[str]: - """ - Get leaf node ids of the graph - - :return: leaf node ids - """ - leaf_node_ids = [] - for node_id in self.node_ids: - if node_id not in self.edge_mapping or ( - len(self.edge_mapping[node_id]) == 1 - and self.edge_mapping[node_id][0].target_node_id == self.root_node_id - ): - leaf_node_ids.append(node_id) - - return leaf_node_ids - @classmethod def _recursively_add_node_ids( cls, node_ids: list[str], edge_mapping: dict[str, list[GraphEdge]], node_id: str @@ -681,11 +640,8 @@ class Graph(BaseModel): if start_node_id not in reverse_edge_mapping: return False - all_routes_node_ids = set() parallel_start_node_ids: dict[str, list[str]] = {} - for branch_node_id, node_ids in routes_node_ids.items(): - all_routes_node_ids.update(node_ids) - + for branch_node_id in routes_node_ids: if branch_node_id in reverse_edge_mapping: for graph_edge in reverse_edge_mapping[branch_node_id]: if graph_edge.source_node_id not in parallel_start_node_ids: @@ -693,8 +649,9 @@ class Graph(BaseModel): parallel_start_node_ids[graph_edge.source_node_id].append(branch_node_id) + expected_branch_set = set(routes_node_ids.keys()) for _, branch_node_ids in parallel_start_node_ids.items(): - if set(branch_node_ids) == set(routes_node_ids.keys()): + if set(branch_node_ids) == expected_branch_set: return True return False diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_graph.py b/api/tests/unit_tests/core/workflow/graph_engine/test_graph.py index 13ba11016a..7660cd6ea0 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_graph.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_graph.py @@ -1,6 +1,4 @@ from core.workflow.graph_engine.entities.graph import Graph -from core.workflow.graph_engine.entities.run_condition import RunCondition -from core.workflow.utils.condition.entities import Condition def test_init(): @@ -162,14 +160,6 @@ def test__init_iteration_graph(): } graph = Graph.init(graph_config=graph_config, root_node_id="template-transform-in-iteration") - graph.add_extra_edge( - source_node_id="answer-in-iteration", - target_node_id="template-transform-in-iteration", - run_condition=RunCondition( - type="condition", - conditions=[Condition(variable_selector=["iteration", "index"], comparison_operator="≤", value="5")], - ), - ) # iteration: # [template-transform-in-iteration -> llm-in-iteration -> answer-in-iteration] @@ -177,7 +167,6 @@ def test__init_iteration_graph(): assert graph.root_node_id == "template-transform-in-iteration" assert graph.edge_mapping.get("template-transform-in-iteration")[0].target_node_id == "llm-in-iteration" assert graph.edge_mapping.get("llm-in-iteration")[0].target_node_id == "answer-in-iteration" - assert graph.edge_mapping.get("answer-in-iteration")[0].target_node_id == "template-transform-in-iteration" def test_parallels_graph(): From c0d82a412db2deac989f7e11396f1b27559aa14d Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Thu, 4 Sep 2025 10:30:24 +0800 Subject: [PATCH 25/46] feat: add test containers based tests for workflow converter (#25115) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../services/workflow/__init__.py | 0 .../workflow/test_workflow_converter.py | 553 ++++++++++++++++++ 2 files changed, 553 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/workflow/__init__.py create mode 100644 api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py diff --git a/api/tests/test_containers_integration_tests/services/workflow/__init__.py b/api/tests/test_containers_integration_tests/services/workflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py new file mode 100644 index 0000000000..8b3db27525 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_converter.py @@ -0,0 +1,553 @@ +import json +from unittest.mock import patch + +import pytest +from faker import Faker + +from core.app.app_config.entities import ( + DatasetEntity, + DatasetRetrieveConfigEntity, + ExternalDataVariableEntity, + ModelConfigEntity, + PromptTemplateEntity, + VariableEntity, + VariableEntityType, +) +from core.model_runtime.entities.llm_entities import LLMMode +from models.account import Account, Tenant +from models.api_based_extension import APIBasedExtension +from models.model import App, AppMode, AppModelConfig +from models.workflow import Workflow +from services.workflow.workflow_converter import WorkflowConverter + + +class TestWorkflowConverter: + """Integration tests for WorkflowConverter using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.workflow.workflow_converter.encrypter") as mock_encrypter, + patch("services.workflow.workflow_converter.SimplePromptTransform") as mock_prompt_transform, + patch("services.workflow.workflow_converter.AgentChatAppConfigManager") as mock_agent_chat_config_manager, + patch("services.workflow.workflow_converter.ChatAppConfigManager") as mock_chat_config_manager, + patch("services.workflow.workflow_converter.CompletionAppConfigManager") as mock_completion_config_manager, + ): + # Setup default mock returns + mock_encrypter.decrypt_token.return_value = "decrypted_api_key" + mock_prompt_transform.return_value.get_prompt_template.return_value = { + "prompt_template": type("obj", (object,), {"template": "You are a helpful assistant {{text_input}}"})(), + "prompt_rules": {"human_prefix": "Human", "assistant_prefix": "Assistant"}, + } + mock_agent_chat_config_manager.get_app_config.return_value = self._create_mock_app_config() + mock_chat_config_manager.get_app_config.return_value = self._create_mock_app_config() + mock_completion_config_manager.get_app_config.return_value = self._create_mock_app_config() + + yield { + "encrypter": mock_encrypter, + "prompt_transform": mock_prompt_transform, + "agent_chat_config_manager": mock_agent_chat_config_manager, + "chat_config_manager": mock_chat_config_manager, + "completion_config_manager": mock_completion_config_manager, + } + + def _create_mock_app_config(self): + """Helper method to create a mock app config.""" + mock_config = type("obj", (object,), {})() + mock_config.variables = [ + VariableEntity( + variable="text_input", + label="Text Input", + type=VariableEntityType.TEXT_INPUT, + ) + ] + mock_config.model = ModelConfigEntity( + provider="openai", + model="gpt-4", + mode=LLMMode.CHAT.value, + parameters={}, + stop=[], + ) + mock_config.prompt_template = PromptTemplateEntity( + prompt_type=PromptTemplateEntity.PromptType.SIMPLE, + simple_prompt_template="You are a helpful assistant {{text_input}}", + ) + mock_config.dataset = None + mock_config.external_data_variables = [] + mock_config.additional_features = type("obj", (object,), {"file_upload": None})() + mock_config.app_model_config_dict = {} + return mock_config + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + from models.account import TenantAccountJoin, TenantAccountRole + + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account, tenant + + def _create_test_app(self, db_session_with_containers, mock_external_service_dependencies, tenant, account): + """ + Helper method to create a test app for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + tenant: Tenant instance + account: Account instance + + Returns: + App: Created app instance + """ + fake = Faker() + + # Create app + app = App( + tenant_id=tenant.id, + name=fake.company(), + mode=AppMode.CHAT.value, + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + enable_site=True, + enable_api=True, + api_rpm=100, + api_rph=10, + is_demo=False, + is_public=False, + created_by=account.id, + updated_by=account.id, + ) + + from extensions.ext_database import db + + db.session.add(app) + db.session.commit() + + # Create app model config + app_model_config = AppModelConfig( + app_id=app.id, + provider="openai", + model="gpt-4", + configs={}, + created_by=account.id, + updated_by=account.id, + ) + db.session.add(app_model_config) + db.session.commit() + + # Link app model config to app + app.app_model_config_id = app_model_config.id + db.session.commit() + + return app + + def test_convert_to_workflow_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful conversion of app to workflow. + + This test verifies: + - Proper app to workflow conversion + - Correct database state after conversion + - Proper relationship establishment + - Workflow creation with correct configuration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account) + + # Act: Execute the conversion + workflow_converter = WorkflowConverter() + new_app = workflow_converter.convert_to_workflow( + app_model=app, + account=account, + name="Test Workflow App", + icon_type="emoji", + icon="🚀", + icon_background="#4CAF50", + ) + + # Assert: Verify the expected outcomes + assert new_app is not None + assert new_app.name == "Test Workflow App" + assert new_app.mode == AppMode.ADVANCED_CHAT.value + assert new_app.icon_type == "emoji" + assert new_app.icon == "🚀" + assert new_app.icon_background == "#4CAF50" + assert new_app.tenant_id == app.tenant_id + assert new_app.created_by == account.id + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(new_app) + assert new_app.id is not None + + # Verify workflow was created + workflow = db.session.query(Workflow).where(Workflow.app_id == new_app.id).first() + assert workflow is not None + assert workflow.tenant_id == app.tenant_id + assert workflow.type == "chat" + + def test_convert_to_workflow_without_app_model_config_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when app model config is missing. + + This test verifies: + - Proper error handling for missing app model config + - Correct exception type and message + - Database state remains unchanged + """ + # Arrange: Create test data without app model config + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + app = App( + tenant_id=tenant.id, + name=fake.company(), + mode=AppMode.CHAT.value, + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + enable_site=True, + enable_api=True, + api_rpm=100, + api_rph=10, + is_demo=False, + is_public=False, + created_by=account.id, + updated_by=account.id, + ) + + from extensions.ext_database import db + + db.session.add(app) + db.session.commit() + + # Act & Assert: Verify proper error handling + workflow_converter = WorkflowConverter() + + # Check initial state + initial_workflow_count = db.session.query(Workflow).count() + + with pytest.raises(ValueError, match="App model config is required"): + workflow_converter.convert_to_workflow( + app_model=app, + account=account, + name="Test Workflow App", + icon_type="emoji", + icon="🚀", + icon_background="#4CAF50", + ) + + # Verify database state remains unchanged + # The workflow creation happens in convert_app_model_config_to_workflow + # which is called before the app_model_config check, so we need to clean up + db.session.rollback() + final_workflow_count = db.session.query(Workflow).count() + assert final_workflow_count == initial_workflow_count + + def test_convert_app_model_config_to_workflow_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful conversion of app model config to workflow. + + This test verifies: + - Proper app model config to workflow conversion + - Correct workflow graph structure + - Proper node creation and configuration + - Database state management + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account) + + # Act: Execute the conversion + workflow_converter = WorkflowConverter() + workflow = workflow_converter.convert_app_model_config_to_workflow( + app_model=app, + app_model_config=app.app_model_config, + account_id=account.id, + ) + + # Assert: Verify the expected outcomes + assert workflow is not None + assert workflow.tenant_id == app.tenant_id + assert workflow.app_id == app.id + assert workflow.type == "chat" + assert workflow.version == Workflow.VERSION_DRAFT + assert workflow.created_by == account.id + + # Verify workflow graph structure + graph = json.loads(workflow.graph) + assert "nodes" in graph + assert "edges" in graph + assert len(graph["nodes"]) > 0 + assert len(graph["edges"]) > 0 + + # Verify start node exists + start_node = next((node for node in graph["nodes"] if node["data"]["type"] == "start"), None) + assert start_node is not None + assert start_node["id"] == "start" + + # Verify LLM node exists + llm_node = next((node for node in graph["nodes"] if node["data"]["type"] == "llm"), None) + assert llm_node is not None + assert llm_node["id"] == "llm" + + # Verify answer node exists for chat mode + answer_node = next((node for node in graph["nodes"] if node["data"]["type"] == "answer"), None) + assert answer_node is not None + assert answer_node["id"] == "answer" + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(workflow) + assert workflow.id is not None + + # Verify features were set + features = json.loads(workflow._features) if workflow._features else {} + assert isinstance(features, dict) + + def test_convert_to_start_node_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful conversion to start node. + + This test verifies: + - Proper start node creation with variables + - Correct node structure and data + - Variable encoding and formatting + """ + # Arrange: Create test variables + variables = [ + VariableEntity( + variable="text_input", + label="Text Input", + type=VariableEntityType.TEXT_INPUT, + ), + VariableEntity( + variable="number_input", + label="Number Input", + type=VariableEntityType.NUMBER, + ), + ] + + # Act: Execute the conversion + workflow_converter = WorkflowConverter() + start_node = workflow_converter._convert_to_start_node(variables=variables) + + # Assert: Verify the expected outcomes + assert start_node is not None + assert start_node["id"] == "start" + assert start_node["data"]["title"] == "START" + assert start_node["data"]["type"] == "start" + assert len(start_node["data"]["variables"]) == 2 + + # Verify variable encoding + first_variable = start_node["data"]["variables"][0] + assert first_variable["variable"] == "text_input" + assert first_variable["label"] == "Text Input" + assert first_variable["type"] == "text-input" + + second_variable = start_node["data"]["variables"][1] + assert second_variable["variable"] == "number_input" + assert second_variable["label"] == "Number Input" + assert second_variable["type"] == "number" + + def test_convert_to_http_request_node_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful conversion to HTTP request node. + + This test verifies: + - Proper HTTP request node creation + - Correct API configuration and authorization + - Code node creation for response parsing + - External data variable mapping + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant, account) + + # Create API based extension + api_based_extension = APIBasedExtension( + tenant_id=tenant.id, + name="Test API Extension", + api_key="encrypted_api_key", + api_endpoint="https://api.example.com/test", + ) + + from extensions.ext_database import db + + db.session.add(api_based_extension) + db.session.commit() + + # Mock encrypter + mock_external_service_dependencies["encrypter"].decrypt_token.return_value = "decrypted_api_key" + + variables = [ + VariableEntity( + variable="user_input", + label="User Input", + type=VariableEntityType.TEXT_INPUT, + ) + ] + + external_data_variables = [ + ExternalDataVariableEntity( + variable="external_data", type="api", config={"api_based_extension_id": api_based_extension.id} + ) + ] + + # Act: Execute the conversion + workflow_converter = WorkflowConverter() + nodes, external_data_variable_node_mapping = workflow_converter._convert_to_http_request_node( + app_model=app, + variables=variables, + external_data_variables=external_data_variables, + ) + + # Assert: Verify the expected outcomes + assert len(nodes) == 2 # HTTP request node + code node + assert len(external_data_variable_node_mapping) == 1 + + # Verify HTTP request node + http_request_node = nodes[0] + assert http_request_node["data"]["type"] == "http-request" + assert http_request_node["data"]["method"] == "post" + assert http_request_node["data"]["url"] == api_based_extension.api_endpoint + assert http_request_node["data"]["authorization"]["type"] == "api-key" + assert http_request_node["data"]["authorization"]["config"]["type"] == "bearer" + assert http_request_node["data"]["authorization"]["config"]["api_key"] == "decrypted_api_key" + + # Verify code node + code_node = nodes[1] + assert code_node["data"]["type"] == "code" + assert code_node["data"]["code_language"] == "python3" + assert "response_json" in code_node["data"]["variables"][0]["variable"] + + # Verify mapping + assert external_data_variable_node_mapping["external_data"] == code_node["id"] + + def test_convert_to_knowledge_retrieval_node_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful conversion to knowledge retrieval node. + + This test verifies: + - Proper knowledge retrieval node creation + - Correct dataset configuration + - Model configuration integration + - Query variable selector setup + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create dataset config + dataset_config = DatasetEntity( + dataset_ids=["dataset_1", "dataset_2"], + retrieve_config=DatasetRetrieveConfigEntity( + retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE, + top_k=10, + score_threshold=0.8, + reranking_model={"provider": "cohere", "model": "rerank-v2"}, + reranking_enabled=True, + ), + ) + + model_config = ModelConfigEntity( + provider="openai", + model="gpt-4", + mode=LLMMode.CHAT.value, + parameters={"temperature": 0.7}, + stop=[], + ) + + # Act: Execute the conversion for advanced chat mode + workflow_converter = WorkflowConverter() + node = workflow_converter._convert_to_knowledge_retrieval_node( + new_app_mode=AppMode.ADVANCED_CHAT, + dataset_config=dataset_config, + model_config=model_config, + ) + + # Assert: Verify the expected outcomes + assert node is not None + assert node["data"]["type"] == "knowledge-retrieval" + assert node["data"]["title"] == "KNOWLEDGE RETRIEVAL" + assert node["data"]["dataset_ids"] == ["dataset_1", "dataset_2"] + assert node["data"]["retrieval_mode"] == "multiple" + assert node["data"]["query_variable_selector"] == ["sys", "query"] + + # Verify multiple retrieval config + multiple_config = node["data"]["multiple_retrieval_config"] + assert multiple_config["top_k"] == 10 + assert multiple_config["score_threshold"] == 0.8 + assert multiple_config["reranking_model"]["provider"] == "cohere" + assert multiple_config["reranking_model"]["model"] == "rerank-v2" + + # Verify single retrieval config is None for multiple strategy + assert node["data"]["single_retrieval_config"] is None From c22b325c31c54f09ecddd9d086177a13b4d5b018 Mon Sep 17 00:00:00 2001 From: fenglin Date: Thu, 4 Sep 2025 10:45:30 +0800 Subject: [PATCH 26/46] fix: align text color in dark mode for config var type selector (#25121) --- .../app/configuration/config-var/config-modal/type-select.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/app/configuration/config-var/config-modal/type-select.tsx b/web/app/components/app/configuration/config-var/config-modal/type-select.tsx index beb7b03e37..2b52991d4a 100644 --- a/web/app/components/app/configuration/config-var/config-modal/type-select.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/type-select.tsx @@ -54,7 +54,7 @@ const TypeSelector: FC = ({ {selectedItem?.name} From 0a0ae16bd64339f4fcd11b251647d281b01cafa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=9E=E6=B3=95=E6=93=8D=E4=BD=9C?= Date: Thu, 4 Sep 2025 10:46:10 +0800 Subject: [PATCH 27/46] fix: old custom model not display credential name (#25112) --- api/core/provider_manager.py | 56 +++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index f8ef0c1846..4a3b8c9dde 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -150,6 +150,9 @@ class ProviderManager: tenant_id ) + # Get All provider model credentials + provider_name_to_provider_model_credentials_dict = self._get_all_provider_model_credentials(tenant_id) + provider_configurations = ProviderConfigurations(tenant_id=tenant_id) # Construct ProviderConfiguration objects for each provider @@ -171,10 +174,18 @@ class ProviderManager: provider_model_records.extend( provider_name_to_provider_model_records_dict.get(provider_id_entity.provider_name, []) ) + provider_model_credentials = provider_name_to_provider_model_credentials_dict.get( + provider_entity.provider, [] + ) + provider_id_entity = ModelProviderID(provider_name) + if provider_id_entity.is_langgenius(): + provider_model_credentials.extend( + provider_name_to_provider_model_credentials_dict.get(provider_id_entity.provider_name, []) + ) # Convert to custom configuration custom_configuration = self._to_custom_configuration( - tenant_id, provider_entity, provider_records, provider_model_records + tenant_id, provider_entity, provider_records, provider_model_records, provider_model_credentials ) # Convert to system configuration @@ -453,6 +464,24 @@ class ProviderManager: ) return provider_name_to_provider_model_settings_dict + @staticmethod + def _get_all_provider_model_credentials(tenant_id: str) -> dict[str, list[ProviderModelCredential]]: + """ + Get All provider model credentials of the workspace. + + :param tenant_id: workspace id + :return: + """ + provider_name_to_provider_model_credentials_dict = defaultdict(list) + with Session(db.engine, expire_on_commit=False) as session: + stmt = select(ProviderModelCredential).where(ProviderModelCredential.tenant_id == tenant_id) + provider_model_credentials = session.scalars(stmt) + for provider_model_credential in provider_model_credentials: + provider_name_to_provider_model_credentials_dict[provider_model_credential.provider_name].append( + provider_model_credential + ) + return provider_name_to_provider_model_credentials_dict + @staticmethod def _get_all_provider_load_balancing_configs(tenant_id: str) -> dict[str, list[LoadBalancingModelConfig]]: """ @@ -539,23 +568,6 @@ class ProviderManager: for credential in available_credentials ] - @staticmethod - def get_credentials_from_provider_model(tenant_id: str, provider_name: str) -> Sequence[ProviderModelCredential]: - """ - Get all the credentials records from ProviderModelCredential by provider_name - - :param tenant_id: workspace id - :param provider_name: provider name - - """ - with Session(db.engine, expire_on_commit=False) as session: - stmt = select(ProviderModelCredential).where( - ProviderModelCredential.tenant_id == tenant_id, ProviderModelCredential.provider_name == provider_name - ) - - all_credentials = session.scalars(stmt).all() - return all_credentials - @staticmethod def _init_trial_provider_records( tenant_id: str, provider_name_to_provider_records_dict: dict[str, list[Provider]] @@ -632,6 +644,7 @@ class ProviderManager: provider_entity: ProviderEntity, provider_records: list[Provider], provider_model_records: list[ProviderModel], + provider_model_credentials: list[ProviderModelCredential], ) -> CustomConfiguration: """ Convert to custom configuration. @@ -647,15 +660,12 @@ class ProviderManager: tenant_id, provider_entity, provider_records ) - # Get all model credentials once - all_model_credentials = self.get_credentials_from_provider_model(tenant_id, provider_entity.provider) - # Get custom models which have not been added to the model list yet - unadded_models = self._get_can_added_models(provider_model_records, all_model_credentials) + unadded_models = self._get_can_added_models(provider_model_records, provider_model_credentials) # Get custom model configurations custom_model_configurations = self._get_custom_model_configurations( - tenant_id, provider_entity, provider_model_records, unadded_models, all_model_credentials + tenant_id, provider_entity, provider_model_records, unadded_models, provider_model_credentials ) can_added_models = [ From ebbb4a5d0be2208e6e7939a580af2e32d6a99263 Mon Sep 17 00:00:00 2001 From: znn Date: Thu, 4 Sep 2025 08:35:45 +0530 Subject: [PATCH 29/46] fix png jpeg export (#25110) --- web/app/components/workflow/operator/export-image.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/app/components/workflow/operator/export-image.tsx b/web/app/components/workflow/operator/export-image.tsx index d14014ed1e..5aac049862 100644 --- a/web/app/components/workflow/operator/export-image.tsx +++ b/web/app/components/workflow/operator/export-image.tsx @@ -97,7 +97,8 @@ const ExportImage: FC = () => { style: { width: `${contentWidth}px`, height: `${contentHeight}px`, - transform: `translate(${padding - nodesBounds.x}px, ${padding - nodesBounds.y}px) scale(${zoom})`, + transform: `translate(${padding - nodesBounds.x}px, ${padding - nodesBounds.y}px)`, + transformOrigin: 'top left', }, } From 865ba8bb4fb73414cbffd1fce8e735f76ab82c5e Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Thu, 4 Sep 2025 11:08:31 +0800 Subject: [PATCH 30/46] Minor fix: correct get_app_model mode for delete() (#25082) Signed-off-by: Yongtao Huang --- api/controllers/console/app/conversation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/controllers/console/app/conversation.py b/api/controllers/console/app/conversation.py index 06f0218771..bc825effad 100644 --- a/api/controllers/console/app/conversation.py +++ b/api/controllers/console/app/conversation.py @@ -117,7 +117,7 @@ class CompletionConversationDetailApi(Resource): @setup_required @login_required @account_initialization_required - @get_app_model(mode=[AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT]) + @get_app_model(mode=AppMode.COMPLETION) def delete(self, app_model, conversation_id): if not current_user.is_editor: raise Forbidden() From d5aaee614f18b31588201c66f016be6f4435e438 Mon Sep 17 00:00:00 2001 From: Tonlo <123lzs123@gmail.com> Date: Thu, 4 Sep 2025 11:14:37 +0800 Subject: [PATCH 31/46] fix recommended apps reading from db logic (#25071) --- api/services/recommended_app_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/recommended_app_service.py b/api/services/recommended_app_service.py index 54c5845515..2aebe6b6b9 100644 --- a/api/services/recommended_app_service.py +++ b/api/services/recommended_app_service.py @@ -15,7 +15,7 @@ class RecommendedAppService: mode = dify_config.HOSTED_FETCH_APP_TEMPLATES_MODE retrieval_instance = RecommendAppRetrievalFactory.get_recommend_app_factory(mode)() result = retrieval_instance.get_recommended_apps_and_categories(language) - if not result.get("recommended_apps") and language != "en-US": + if not result.get("recommended_apps"): result = ( RecommendAppRetrievalFactory.get_buildin_recommend_app_retrieval().fetch_recommended_apps_from_builtin( "en-US" From cdf9b674dc45b3efc062793204dafe6563ea8a9b Mon Sep 17 00:00:00 2001 From: Davide Delbianco Date: Thu, 4 Sep 2025 05:15:36 +0200 Subject: [PATCH 32/46] chore: Bump weaviate-client to latest v3 version (#25096) --- api/core/rag/datasource/vdb/weaviate/weaviate_vector.py | 7 ------- api/pyproject.toml | 2 +- api/uv.lock | 8 ++++---- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py index b3fe013e70..bc237b591a 100644 --- a/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py +++ b/api/core/rag/datasource/vdb/weaviate/weaviate_vector.py @@ -41,13 +41,6 @@ class WeaviateVector(BaseVector): weaviate.connect.connection.has_grpc = False # ty: ignore [unresolved-attribute] - # Fix to minimize the performance impact of the deprecation check in weaviate-client 3.24.0, - # by changing the connection timeout to pypi.org from 1 second to 0.001 seconds. - # TODO: This can be removed once weaviate-client is updated to 3.26.7 or higher, - # which does not contain the deprecation check. - if hasattr(weaviate.connect.connection, "PYPI_TIMEOUT"): # ty: ignore [unresolved-attribute] - weaviate.connect.connection.PYPI_TIMEOUT = 0.001 # ty: ignore [unresolved-attribute] - try: client = weaviate.Client( url=config.endpoint, auth_client_secret=auth_config, timeout_config=(5, 60), startup_period=None diff --git a/api/pyproject.toml b/api/pyproject.toml index a0c108cd2c..7416380fdb 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -214,7 +214,7 @@ vdb = [ "tidb-vector==0.0.9", "upstash-vector==0.6.0", "volcengine-compat~=1.0.0", - "weaviate-client~=3.24.0", + "weaviate-client~=3.26.7", "xinference-client~=1.2.2", "mo-vector~=0.1.13", ] diff --git a/api/uv.lock b/api/uv.lock index 7e67a84ce2..987dc7243d 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1637,7 +1637,7 @@ vdb = [ { name = "tidb-vector", specifier = "==0.0.9" }, { name = "upstash-vector", specifier = "==0.6.0" }, { name = "volcengine-compat", specifier = "~=1.0.0" }, - { name = "weaviate-client", specifier = "~=3.24.0" }, + { name = "weaviate-client", specifier = "~=3.26.7" }, { name = "xinference-client", specifier = "~=1.2.2" }, ] @@ -6642,16 +6642,16 @@ wheels = [ [[package]] name = "weaviate-client" -version = "3.24.2" +version = "3.26.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "authlib" }, { name = "requests" }, { name = "validators" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1f/c1/3285a21d8885f2b09aabb65edb9a8e062a35c2d7175e1bb024fa096582ab/weaviate-client-3.24.2.tar.gz", hash = "sha256:6914c48c9a7e5ad0be9399271f9cb85d6f59ab77476c6d4e56a3925bf149edaa", size = 199332, upload-time = "2023-10-04T08:37:54.26Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/2e/9588bae34c1d67d05ccc07d74a4f5d73cce342b916f79ab3a9114c6607bb/weaviate_client-3.26.7.tar.gz", hash = "sha256:ea538437800abc6edba21acf213accaf8a82065584ee8b914bae4a4ad4ef6b70", size = 210480, upload-time = "2024-08-15T13:27:02.431Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/98/3136d05f93e30cf29e1db280eaadf766df18d812dfe7994bcced653b2340/weaviate_client-3.24.2-py3-none-any.whl", hash = "sha256:bc50ca5fcebcd48de0d00f66700b0cf7c31a97c4cd3d29b4036d77c5d1d9479b", size = 107968, upload-time = "2023-10-04T08:37:52.511Z" }, + { url = "https://files.pythonhosted.org/packages/2a/95/fb326052bc1d73cb3c19fcfaf6ebb477f896af68de07eaa1337e27ee57fa/weaviate_client-3.26.7-py3-none-any.whl", hash = "sha256:48b8d4b71df881b4e5e15964d7ac339434338ccee73779e3af7eab698a92083b", size = 120051, upload-time = "2024-08-15T13:27:00.212Z" }, ] [[package]] From ead8568bfc7394bdf17cd9006068dd051457db50 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 11:58:54 +0800 Subject: [PATCH 33/46] fix: some errors reported by basedpyright Signed-off-by: -LAN- --- api/core/workflow/nodes/agent/agent_node.py | 2 +- api/core/workflow/nodes/base/entities.py | 3 --- api/core/workflow/nodes/code/code_node.py | 6 ----- .../workflow/nodes/http_request/executor.py | 22 +++++-------------- api/core/workflow/nodes/list_operator/node.py | 14 ++++-------- api/core/workflow/nodes/llm/node.py | 16 +++++--------- .../nodes/parameter_extractor/entities.py | 2 -- .../parameter_extractor_node.py | 12 ++-------- api/core/workflow/nodes/tool/tool_node.py | 2 +- .../nodes/variable_assigner/v1/node.py | 4 ---- .../nodes/variable_assigner/v2/helpers.py | 2 -- .../nodes/variable_assigner/v2/node.py | 2 -- 12 files changed, 19 insertions(+), 68 deletions(-) diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index fa912d5035..fc2ad2702d 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -579,7 +579,7 @@ class AgentNode(Node): for key, value in msg_metadata.items() if key in WorkflowNodeExecutionMetadataKey.__members__.values() } - if message.message.json_object is not None: + if message.message.json_object: json_list.append(message.message.json_object) elif message.type == ToolInvokeMessage.MessageType.LINK: assert isinstance(message.message, ToolInvokeMessage.TextMessage) diff --git a/api/core/workflow/nodes/base/entities.py b/api/core/workflow/nodes/base/entities.py index 5503ea7519..fd2eb68dc0 100644 --- a/api/core/workflow/nodes/base/entities.py +++ b/api/core/workflow/nodes/base/entities.py @@ -73,9 +73,6 @@ class DefaultValue(BaseModel): @model_validator(mode="after") def validate_value_type(self) -> "DefaultValue": - if self.type is None: - raise DefaultValueTypeError("type field is required") - # Type validation configuration type_validators = { DefaultValueType.STRING: { diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 624b71028a..61820cc700 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -108,8 +108,6 @@ class CodeNode(Node): """ if value is None: return None - if not isinstance(value, str): - raise OutputValidationError(f"Output variable `{variable}` must be a string") if len(value) > dify_config.CODE_MAX_STRING_LENGTH: raise OutputValidationError( @@ -122,8 +120,6 @@ class CodeNode(Node): def _check_boolean(self, value: bool | None, variable: str) -> bool | None: if value is None: return None - if not isinstance(value, bool): - raise OutputValidationError(f"Output variable `{variable}` must be a boolean") return value @@ -136,8 +132,6 @@ class CodeNode(Node): """ if value is None: return None - if not isinstance(value, int | float): - raise OutputValidationError(f"Output variable `{variable}` must be a number") if value > dify_config.CODE_MAX_NUMBER or value < dify_config.CODE_MIN_NUMBER: raise OutputValidationError( diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index ed48bc6484..a36a0a9d98 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -263,8 +263,6 @@ class Executor: if authorization.config is None: raise AuthorizationConfigError("authorization config is required") - if self.auth.config.api_key is None: - raise AuthorizationConfigError("api_key is required") if not authorization.config.header: authorization.config.header = "Authorization" @@ -409,30 +407,22 @@ class Executor: if self.files and not all(f[0] == "__multipart_placeholder__" for f in self.files): for file_entry in self.files: # file_entry should be (key, (filename, content, mime_type)), but handle edge cases - if len(file_entry) != 2 or not isinstance(file_entry[1], tuple) or len(file_entry[1]) < 2: + if len(file_entry) != 2 or len(file_entry[1]) < 2: continue # skip malformed entries key = file_entry[0] content = file_entry[1][1] body_string += f"--{boundary}\r\n" body_string += f'Content-Disposition: form-data; name="{key}"\r\n\r\n' # decode content safely - if isinstance(content, bytes): - try: - body_string += content.decode("utf-8") - except UnicodeDecodeError: - body_string += content.decode("utf-8", errors="replace") - elif isinstance(content, str): - body_string += content - else: - body_string += f"[Unsupported content type: {type(content).__name__}]" + try: + body_string += content.decode("utf-8") + except UnicodeDecodeError: + body_string += content.decode("utf-8", errors="replace") body_string += "\r\n" body_string += f"--{boundary}--\r\n" elif self.node_data.body: if self.content: - if isinstance(self.content, str): - body_string = self.content - elif isinstance(self.content, bytes): - body_string = self.content.decode("utf-8", errors="replace") + body_string = self.content.decode("utf-8", errors="replace") elif self.data and self.node_data.body.type == "x-www-form-urlencoded": body_string = urlencode(self.data) elif self.data and self.node_data.body.type == "form-data": diff --git a/api/core/workflow/nodes/list_operator/node.py b/api/core/workflow/nodes/list_operator/node.py index 05197aafa5..96220b635b 100644 --- a/api/core/workflow/nodes/list_operator/node.py +++ b/api/core/workflow/nodes/list_operator/node.py @@ -170,27 +170,21 @@ class ListOperatorNode(Node): ) result = list(filter(filter_func, variable.value)) variable = variable.model_copy(update={"value": result}) - elif isinstance(variable, ArrayBooleanSegment): - if not isinstance(condition.value, bool): - raise InvalidFilterValueError(f"Invalid filter value: {condition.value}") + else: filter_func = _get_boolean_filter_func(condition=condition.comparison_operator, value=condition.value) result = list(filter(filter_func, variable.value)) variable = variable.model_copy(update={"value": result}) - else: - raise AssertionError("this statment should be unreachable.") return variable def _apply_order(self, variable: _SUPPORTED_TYPES_ALIAS) -> _SUPPORTED_TYPES_ALIAS: if isinstance(variable, (ArrayStringSegment, ArrayNumberSegment, ArrayBooleanSegment)): - result = sorted(variable.value, reverse=self._node_data.order_by == Order.DESC) + result = sorted(variable.value, reverse=self._node_data.order_by.value == Order.DESC) variable = variable.model_copy(update={"value": result}) - elif isinstance(variable, ArrayFileSegment): + else: result = _order_file( order=self._node_data.order_by.value, order_by=self._node_data.order_by.key, array=variable.value ) variable = variable.model_copy(update={"value": result}) - else: - raise AssertionError("this statement should be unreachable") return variable @@ -304,7 +298,7 @@ def _get_file_filter_func(*, key: str, condition: str, value: str | Sequence[str if key in {"name", "extension", "mime_type", "url"} and isinstance(value, str): extract_func = _get_file_extract_string_func(key=key) return lambda x: _get_string_filter_func(condition=condition, value=value)(extract_func(x)) - if key in {"type", "transfer_method"} and isinstance(value, Sequence): + if key in {"type", "transfer_method"}: extract_func = _get_file_extract_string_func(key=key) return lambda x: _get_sequence_filter_func(condition=condition, value=value)(extract_func(x)) elif key == "size" and isinstance(value, str): diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index abf2a36a35..d8cf33be18 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -195,9 +195,8 @@ class LLMNode(Node): generator = self._fetch_context(node_data=self._node_data) context = None for event in generator: - if isinstance(event, RunRetrieverResourceEvent): - context = event.context - yield event + context = event.context + yield event if context: node_inputs["#context#"] = context @@ -282,7 +281,7 @@ class LLMNode(Node): outputs = {"text": result_text, "usage": jsonable_encoder(usage), "finish_reason": finish_reason} if structured_output: outputs["structured_output"] = structured_output.structured_output - if self._file_outputs is not None: + if self._file_outputs: outputs["files"] = ArrayFileSegment(value=self._file_outputs) # Send final chunk event to indicate streaming is complete @@ -827,9 +826,7 @@ class LLMNode(Node): prompt_template = typed_node_data.prompt_template variable_selectors = [] - if isinstance(prompt_template, list) and all( - isinstance(prompt, LLMNodeChatModelMessage) for prompt in prompt_template - ): + if isinstance(prompt_template, list): for prompt in prompt_template: if prompt.edition_type != "jinja2": variable_template_parser = VariableTemplateParser(template=prompt.text) @@ -1063,7 +1060,7 @@ class LLMNode(Node): return if isinstance(contents, str): yield contents - elif isinstance(contents, list): + else: for item in contents: if isinstance(item, TextPromptMessageContent): yield item.data @@ -1077,9 +1074,6 @@ class LLMNode(Node): else: logger.warning("unknown item type encountered, type=%s", type(item)) yield str(item) - else: - logger.warning("unknown contents type encountered, type=%s", type(contents)) - yield str(contents) @property def retry(self) -> bool: diff --git a/api/core/workflow/nodes/parameter_extractor/entities.py b/api/core/workflow/nodes/parameter_extractor/entities.py index 4e93cd9688..cb221d39e6 100644 --- a/api/core/workflow/nodes/parameter_extractor/entities.py +++ b/api/core/workflow/nodes/parameter_extractor/entities.py @@ -31,8 +31,6 @@ _VALID_PARAMETER_TYPES = frozenset( def _validate_type(parameter_type: str) -> SegmentType: - if not isinstance(parameter_type, str): - raise TypeError(f"type should be str, got {type(parameter_type)}, value={parameter_type}") if parameter_type not in _VALID_PARAMETER_TYPES: raise ValueError(f"type {parameter_type} is not allowd to use in Parameter Extractor node.") diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 648ea69936..445fe364ff 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -10,7 +10,7 @@ from core.file import File from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance from core.model_runtime.entities import ImagePromptMessageContent -from core.model_runtime.entities.llm_entities import LLMResult, LLMUsage +from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, PromptMessage, @@ -38,7 +38,6 @@ from factories.variable_factory import build_segment_with_type from .entities import ParameterExtractorNodeData from .exc import ( - InvalidInvokeResultError, InvalidModelModeError, InvalidModelTypeError, InvalidNumberOfParametersError, @@ -304,8 +303,6 @@ class ParameterExtractorNode(Node): ) # handle invoke result - if not isinstance(invoke_result, LLMResult): - raise InvalidInvokeResultError(f"Invalid invoke result: {invoke_result}") text = invoke_result.message.content or "" if not isinstance(text, str): @@ -317,9 +314,6 @@ class ParameterExtractorNode(Node): # deduct quota llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) - if text is None: - text = "" - return text, usage, tool_call def _generate_function_call_prompt( @@ -583,8 +577,6 @@ class ParameterExtractorNode(Node): return int(value) elif isinstance(value, (int, float)): return value - elif not isinstance(value, str): - return None if "." in value: try: return float(value) @@ -697,7 +689,7 @@ class ParameterExtractorNode(Node): for parameter in data.parameters: if parameter.type == "number": result[parameter.name] = 0 - elif parameter.type == "bool": + elif parameter.type == "boolean": result[parameter.name] = False elif parameter.type in {"string", "select"}: result[parameter.name] = "" diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 9708edcb38..63d7a0eea5 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -323,7 +323,7 @@ class ToolNode(Node): elif message.type == ToolInvokeMessage.MessageType.JSON: assert isinstance(message.message, ToolInvokeMessage.JsonMessage) # JSON message handling for tool node - if message.message.json_object is not None: + if message.message.json_object: json.append(message.message.json_object) elif message.type == ToolInvokeMessage.MessageType.LINK: assert isinstance(message.message, ToolInvokeMessage.TextMessage) diff --git a/api/core/workflow/nodes/variable_assigner/v1/node.py b/api/core/workflow/nodes/variable_assigner/v1/node.py index 1c7baf3a18..9b3a7c61f7 100644 --- a/api/core/workflow/nodes/variable_assigner/v1/node.py +++ b/api/core/workflow/nodes/variable_assigner/v1/node.py @@ -117,12 +117,8 @@ class VariableAssignerNode(Node): case WriteMode.CLEAR: income_value = get_zero_value(original_variable.value_type) - if income_value is None: - raise VariableOperatorNodeError("income value not found") updated_variable = original_variable.model_copy(update={"value": income_value.to_object()}) - case _: - raise VariableOperatorNodeError(f"unsupported write mode: {self._node_data.write_mode}") # Over write the variable. self.graph_runtime_state.variable_pool.add(assigned_variable_selector, updated_variable) diff --git a/api/core/workflow/nodes/variable_assigner/v2/helpers.py b/api/core/workflow/nodes/variable_assigner/v2/helpers.py index 324f23a900..f5490fb900 100644 --- a/api/core/workflow/nodes/variable_assigner/v2/helpers.py +++ b/api/core/workflow/nodes/variable_assigner/v2/helpers.py @@ -25,8 +25,6 @@ def is_operation_supported(*, variable_type: SegmentType, operation: Operation): # Only array variable can be appended or extended # Only array variable can have elements removed return variable_type.is_array_type() - case _: - return False def is_variable_input_supported(*, operation: Operation): diff --git a/api/core/workflow/nodes/variable_assigner/v2/node.py b/api/core/workflow/nodes/variable_assigner/v2/node.py index b863204dda..277021bc94 100644 --- a/api/core/workflow/nodes/variable_assigner/v2/node.py +++ b/api/core/workflow/nodes/variable_assigner/v2/node.py @@ -274,5 +274,3 @@ class VariableAssignerNode(Node): if not variable.value: return variable.value return variable.value[:-1] - case _: - raise OperationNotSupportedError(operation=operation, variable_type=variable.value_type) From 2b28aed4e217b3d260483136fc80ccb7539f70eb Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Thu, 4 Sep 2025 04:50:21 +0000 Subject: [PATCH 34/46] [autofix.ci] apply automated fixes --- api/core/workflow/nodes/http_request/executor.py | 1 - api/core/workflow/nodes/variable_assigner/v1/node.py | 1 - 2 files changed, 2 deletions(-) diff --git a/api/core/workflow/nodes/http_request/executor.py b/api/core/workflow/nodes/http_request/executor.py index a36a0a9d98..a0319cfe5f 100644 --- a/api/core/workflow/nodes/http_request/executor.py +++ b/api/core/workflow/nodes/http_request/executor.py @@ -263,7 +263,6 @@ class Executor: if authorization.config is None: raise AuthorizationConfigError("authorization config is required") - if not authorization.config.header: authorization.config.header = "Authorization" diff --git a/api/core/workflow/nodes/variable_assigner/v1/node.py b/api/core/workflow/nodes/variable_assigner/v1/node.py index 9b3a7c61f7..97ceb33422 100644 --- a/api/core/workflow/nodes/variable_assigner/v1/node.py +++ b/api/core/workflow/nodes/variable_assigner/v1/node.py @@ -119,7 +119,6 @@ class VariableAssignerNode(Node): income_value = get_zero_value(original_variable.value_type) updated_variable = original_variable.model_copy(update={"value": income_value.to_object()}) - # Over write the variable. self.graph_runtime_state.variable_pool.add(assigned_variable_selector, updated_variable) From 804e599598d5636f1fab229576ff5616029554c5 Mon Sep 17 00:00:00 2001 From: Will Date: Thu, 4 Sep 2025 13:59:34 +0800 Subject: [PATCH 35/46] fix: EndUser not bound to Session when plugin invokes callback (#25132) --- api/core/plugin/backwards_invocation/app.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/api/core/plugin/backwards_invocation/app.py b/api/core/plugin/backwards_invocation/app.py index a799646444..48f44da68e 100644 --- a/api/core/plugin/backwards_invocation/app.py +++ b/api/core/plugin/backwards_invocation/app.py @@ -2,6 +2,7 @@ from collections.abc import Generator, Mapping from typing import Optional, Union from sqlalchemy import select +from sqlalchemy.orm import Session from controllers.service_api.wraps import create_or_update_end_user_for_user_id from core.app.app_config.common.parameters_mapping import get_parameters_from_feature_dict @@ -194,11 +195,12 @@ class PluginAppBackwardsInvocation(BaseBackwardsInvocation): """ get the user by user id """ - stmt = select(EndUser).where(EndUser.id == user_id) - user = db.session.scalar(stmt) - if not user: - stmt = select(Account).where(Account.id == user_id) - user = db.session.scalar(stmt) + with Session(db.engine, expire_on_commit=False) as session: + stmt = select(EndUser).where(EndUser.id == user_id) + user = session.scalar(stmt) + if not user: + stmt = select(Account).where(Account.id == user_id) + user = session.scalar(stmt) if not user: raise ValueError("user not found") From 8d5f788f2b4a8b92ee108b10974eb1833973c72a Mon Sep 17 00:00:00 2001 From: CrabSAMA <40541269+CrabSAMA@users.noreply.github.com> Date: Thu, 4 Sep 2025 15:21:43 +0800 Subject: [PATCH 36/46] feat(workflow): Allow paste node into nested block (#24234) Co-authored-by: crab.huang --- .../workflow/hooks/use-nodes-interactions.ts | 43 ++++++++++++++++++- web/app/components/workflow/utils/node.ts | 7 +++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index fdfb25b04d..7046d1a93a 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -39,6 +39,7 @@ import { import { genNewNodeTitleFromOld, generateNewNode, + getNestedNodePosition, getNodeCustomTypeByNodeDataType, getNodesConnectedSourceOrTargetHandleIdsMap, getTopLeftNodePosition, @@ -1326,8 +1327,7 @@ export const useNodesInteractions = () => { }) newChildren.push(newIterationStartNode!) } - - if (nodeToPaste.data.type === BlockEnum.Loop) { + else if (nodeToPaste.data.type === BlockEnum.Loop) { newLoopStartNode!.parentId = newNode.id; (newNode.data as LoopNodeType).start_node_id = newLoopStartNode!.id @@ -1337,6 +1337,44 @@ export const useNodesInteractions = () => { }) newChildren.push(newLoopStartNode!) } + else { + // single node paste + const selectedNode = nodes.find(node => node.selected) + if (selectedNode) { + const commonNestedDisallowPasteNodes = [ + // end node only can be placed outermost layer + BlockEnum.End, + ] + + // handle disallow paste node + if (commonNestedDisallowPasteNodes.includes(nodeToPaste.data.type)) + return + + // handle paste to nested block + if (selectedNode.data.type === BlockEnum.Iteration) { + newNode.data.isInIteration = true + newNode.data.iteration_id = selectedNode.data.iteration_id + newNode.parentId = selectedNode.id + newNode.positionAbsolute = { + x: newNode.position.x, + y: newNode.position.y, + } + // set position base on parent node + newNode.position = getNestedNodePosition(newNode, selectedNode) + } + else if (selectedNode.data.type === BlockEnum.Loop) { + newNode.data.isInLoop = true + newNode.data.loop_id = selectedNode.data.loop_id + newNode.parentId = selectedNode.id + newNode.positionAbsolute = { + x: newNode.position.x, + y: newNode.position.y, + } + // set position base on parent node + newNode.position = getNestedNodePosition(newNode, selectedNode) + } + } + } nodesToPaste.push(newNode) @@ -1344,6 +1382,7 @@ export const useNodesInteractions = () => { nodesToPaste.push(...newChildren) }) + // only handle edge when paste nested block edges.forEach((edge) => { const sourceId = idMapping[edge.source] const targetId = idMapping[edge.target] diff --git a/web/app/components/workflow/utils/node.ts b/web/app/components/workflow/utils/node.ts index 7a9e33b2f6..726908bff1 100644 --- a/web/app/components/workflow/utils/node.ts +++ b/web/app/components/workflow/utils/node.ts @@ -135,6 +135,13 @@ export const getTopLeftNodePosition = (nodes: Node[]) => { } } +export const getNestedNodePosition = (node: Node, parentNode: Node) => { + return { + x: node.position.x - parentNode.position.x, + y: node.position.y - parentNode.position.y, + } +} + export const hasRetryNode = (nodeType?: BlockEnum) => { return nodeType === BlockEnum.LLM || nodeType === BlockEnum.Tool || nodeType === BlockEnum.HttpRequest || nodeType === BlockEnum.Code } From 226f14a20f183753d413d3d18f6eefb75cd44233 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 15:35:20 +0800 Subject: [PATCH 37/46] feat(graph_engine): implement scale down worker Signed-off-by: -LAN- --- .../worker_management/simple_worker_pool.py | 169 +++++++++++++++++- 1 file changed, 166 insertions(+), 3 deletions(-) diff --git a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py index 94b8ff3ca2..367c2b36fc 100644 --- a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py @@ -5,8 +5,10 @@ This is a simpler implementation that merges WorkerPool, ActivityTracker, DynamicScaler, and WorkerFactory into a single class. """ +import logging import queue import threading +import time from typing import TYPE_CHECKING, final from configs import dify_config @@ -15,6 +17,8 @@ from core.workflow.graph_events import GraphNodeEventBase from ..worker import Worker +logger = logging.getLogger(__name__) + if TYPE_CHECKING: from contextvars import Context @@ -74,6 +78,10 @@ class SimpleWorkerPool: self._lock = threading.RLock() self._running = False + # Track worker idle times for scale-down + self._worker_idle_times: dict[int, float] = {} + self._worker_active_states: dict[int, bool] = {} + def start(self, initial_count: int | None = None) -> None: """ Start the worker pool. @@ -97,6 +105,14 @@ class SimpleWorkerPool: else: initial_count = min(self._min_workers + 2, self._max_workers) + logger.debug( + "Starting worker pool: %d workers (nodes=%d, min=%d, max=%d)", + initial_count, + node_count, + self._min_workers, + self._max_workers, + ) + # Create initial workers for _ in range(initial_count): self._create_worker() @@ -105,6 +121,10 @@ class SimpleWorkerPool: """Stop all workers in the pool.""" with self._lock: self._running = False + worker_count = len(self._workers) + + if worker_count > 0: + logger.debug("Stopping worker pool: %d workers", worker_count) # Stop all workers for worker in self._workers: @@ -116,6 +136,8 @@ class SimpleWorkerPool: worker.join(timeout=10.0) self._workers.clear() + self._worker_active_states.clear() + self._worker_idle_times.clear() def _create_worker(self) -> None: """Create and start a new worker.""" @@ -129,11 +151,146 @@ class SimpleWorkerPool: worker_id=worker_id, flask_app=self._flask_app, context_vars=self._context_vars, + on_idle_callback=self._on_worker_idle, + on_active_callback=self._on_worker_active, ) worker.start() self._workers.append(worker) + # Initialize tracking + self._worker_active_states[worker_id] = True + self._worker_idle_times[worker_id] = 0.0 + + def _on_worker_idle(self, worker_id: int) -> None: + """Handle worker becoming idle.""" + with self._lock: + if worker_id not in self._worker_active_states: + return + + # Mark as idle and record time if transitioning from active + if self._worker_active_states.get(worker_id, False): + self._worker_active_states[worker_id] = False + self._worker_idle_times[worker_id] = time.time() + + def _on_worker_active(self, worker_id: int) -> None: + """Handle worker becoming active.""" + with self._lock: + if worker_id not in self._worker_active_states: + return + + # Mark as active and clear idle time + self._worker_active_states[worker_id] = True + self._worker_idle_times[worker_id] = 0.0 + + def _remove_worker(self, worker: Worker, worker_id: int) -> None: + """Remove a specific worker from the pool.""" + # Stop the worker + worker.stop() + + # Wait for it to finish + if worker.is_alive(): + worker.join(timeout=2.0) + + # Remove from list and tracking + if worker in self._workers: + self._workers.remove(worker) + + # Clean up tracking + self._worker_active_states.pop(worker_id, None) + self._worker_idle_times.pop(worker_id, None) + + def _try_scale_up(self, queue_depth: int, current_count: int) -> bool: + """ + Try to scale up workers if needed. + + Args: + queue_depth: Current queue depth + current_count: Current number of workers + + Returns: + True if scaled up, False otherwise + """ + if queue_depth > self._scale_up_threshold and current_count < self._max_workers: + old_count = current_count + self._create_worker() + + logger.debug( + "Scaled up workers: %d -> %d (queue_depth=%d exceeded threshold=%d)", + old_count, + len(self._workers), + queue_depth, + self._scale_up_threshold, + ) + return True + return False + + def _try_scale_down(self, queue_depth: int, current_count: int, active_count: int, idle_count: int) -> bool: + """ + Try to scale down workers if we have excess capacity. + + Args: + queue_depth: Current queue depth + current_count: Current number of workers + active_count: Number of active workers + idle_count: Number of idle workers + + Returns: + True if scaled down, False otherwise + """ + # Skip if we're at minimum or have no idle workers + if current_count <= self._min_workers or idle_count == 0: + return False + + # Check if we have excess capacity + has_excess_capacity = ( + queue_depth <= active_count # Active workers can handle current queue + or idle_count > active_count # More idle than active workers + or (queue_depth == 0 and idle_count > 0) # No work and have idle workers + ) + + if not has_excess_capacity: + return False + + # Find and remove idle workers + current_time = time.time() + workers_to_remove = [] + + for worker in self._workers: + worker_id = worker._worker_id + + # Check if worker is idle and has exceeded idle time threshold + if not self._worker_active_states.get(worker_id, True) and self._worker_idle_times.get(worker_id, 0) > 0: + idle_duration = current_time - self._worker_idle_times[worker_id] + if idle_duration >= self._scale_down_idle_time: + # Don't remove if it would leave us unable to handle the queue + remaining_workers = current_count - len(workers_to_remove) - 1 + if remaining_workers >= self._min_workers and remaining_workers >= max(1, queue_depth // 2): + workers_to_remove.append((worker, worker_id)) + # Only remove one worker per check to avoid aggressive scaling + break + + # Remove idle workers if any found + if workers_to_remove: + old_count = current_count + for worker, worker_id in workers_to_remove: + self._remove_worker(worker, worker_id) + + logger.debug( + "Scaled down workers: %d -> %d (removed %d idle workers after %.1fs, " + "queue_depth=%d, active=%d, idle=%d)", + old_count, + len(self._workers), + len(workers_to_remove), + self._scale_down_idle_time, + queue_depth, + active_count, + idle_count - len(workers_to_remove), + ) + return True + + return False + def check_and_scale(self) -> None: """Check and perform scaling if needed.""" with self._lock: @@ -143,9 +300,15 @@ class SimpleWorkerPool: current_count = len(self._workers) queue_depth = self._ready_queue.qsize() - # Simple scaling logic - if queue_depth > self._scale_up_threshold and current_count < self._max_workers: - self._create_worker() + # Count active vs idle workers + active_count = sum(1 for state in self._worker_active_states.values() if state) + idle_count = current_count - active_count + + # Try to scale up if queue is backing up + self._try_scale_up(queue_depth, current_count) + + # Try to scale down if we have excess capacity + self._try_scale_down(queue_depth, current_count, active_count, idle_count) def get_worker_count(self) -> int: """Get current number of workers.""" From fb307ae12896e703acdfa8c58d6298825a6a8203 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Thu, 4 Sep 2025 17:12:48 +0800 Subject: [PATCH 38/46] feat: add TypeScript type safety for i18next with automated maintenance (#25152) --- .../translate-i18n-base-on-english.yml | 16 ++- .github/workflows/web-tests.yml | 5 + web/global.d.ts | 2 + web/i18n-config/check-i18n-sync.js | 120 ++++++++++++++++ web/i18n-config/generate-i18n-types.js | 135 ++++++++++++++++++ web/package.json | 2 + web/types/i18n.d.ts | 96 +++++++++++++ 7 files changed, 373 insertions(+), 3 deletions(-) create mode 100644 web/i18n-config/check-i18n-sync.js create mode 100644 web/i18n-config/generate-i18n-types.js create mode 100644 web/types/i18n.d.ts diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml index c004836808..836c3e0b02 100644 --- a/.github/workflows/translate-i18n-base-on-english.yml +++ b/.github/workflows/translate-i18n-base-on-english.yml @@ -67,12 +67,22 @@ jobs: working-directory: ./web run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }} + - name: Generate i18n type definitions + if: env.FILES_CHANGED == 'true' + working-directory: ./web + run: pnpm run gen:i18n-types + - name: Create Pull Request if: env.FILES_CHANGED == 'true' uses: peter-evans/create-pull-request@v6 with: token: ${{ secrets.GITHUB_TOKEN }} - commit-message: Update i18n files based on en-US changes - title: 'chore: translate i18n files' - body: This PR was automatically created to update i18n files based on changes in en-US locale. + commit-message: Update i18n files and type definitions based on en-US changes + title: 'chore: translate i18n files and update type definitions' + body: | + This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale. + + **Changes included:** + - Updated translation files for all locales + - Regenerated TypeScript type definitions for type safety branch: chore/automated-i18n-updates diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml index e25ae2302f..3313e58614 100644 --- a/.github/workflows/web-tests.yml +++ b/.github/workflows/web-tests.yml @@ -47,6 +47,11 @@ jobs: working-directory: ./web run: pnpm install --frozen-lockfile + - name: Check i18n types synchronization + if: steps.changed-files.outputs.any_changed == 'true' + working-directory: ./web + run: pnpm run check:i18n-types + - name: Run tests if: steps.changed-files.outputs.any_changed == 'true' working-directory: ./web diff --git a/web/global.d.ts b/web/global.d.ts index 7fbe20421d..eb39fe0c39 100644 --- a/web/global.d.ts +++ b/web/global.d.ts @@ -8,3 +8,5 @@ declare module '*.mdx' { let MDXComponent: (props: any) => JSX.Element export default MDXComponent } + +import './types/i18n' diff --git a/web/i18n-config/check-i18n-sync.js b/web/i18n-config/check-i18n-sync.js new file mode 100644 index 0000000000..e67c567f49 --- /dev/null +++ b/web/i18n-config/check-i18n-sync.js @@ -0,0 +1,120 @@ +#!/usr/bin/env node + +const fs = require('fs') +const path = require('path') +const { camelCase } = require('lodash') + +// Import the NAMESPACES array from i18next-config.ts +function getNamespacesFromConfig() { + const configPath = path.join(__dirname, 'i18next-config.ts') + const configContent = fs.readFileSync(configPath, 'utf8') + + // Extract NAMESPACES array using regex + const namespacesMatch = configContent.match(/const NAMESPACES = \[([\s\S]*?)\]/) + if (!namespacesMatch) { + throw new Error('Could not find NAMESPACES array in i18next-config.ts') + } + + // Parse the namespaces + const namespacesStr = namespacesMatch[1] + const namespaces = namespacesStr + .split(',') + .map(line => line.trim()) + .filter(line => line.startsWith("'") || line.startsWith('"')) + .map(line => line.slice(1, -1)) // Remove quotes + + return namespaces +} + +function getNamespacesFromTypes() { + const typesPath = path.join(__dirname, '../types/i18n.d.ts') + + if (!fs.existsSync(typesPath)) { + return null + } + + const typesContent = fs.readFileSync(typesPath, 'utf8') + + // Extract namespaces from Messages type + const messagesMatch = typesContent.match(/export type Messages = \{([\s\S]*?)\}/) + if (!messagesMatch) { + return null + } + + // Parse the properties + const propertiesStr = messagesMatch[1] + const properties = propertiesStr + .split('\n') + .map(line => line.trim()) + .filter(line => line.includes(':')) + .map(line => line.split(':')[0].trim()) + .filter(prop => prop.length > 0) + + return properties +} + +function main() { + try { + console.log('🔍 Checking i18n types synchronization...') + + // Get namespaces from config + const configNamespaces = getNamespacesFromConfig() + console.log(`📦 Found ${configNamespaces.length} namespaces in config`) + + // Convert to camelCase for comparison + const configCamelCase = configNamespaces.map(ns => camelCase(ns)).sort() + + // Get namespaces from type definitions + const typeNamespaces = getNamespacesFromTypes() + + if (!typeNamespaces) { + console.error('❌ Type definitions file not found or invalid') + console.error(' Run: pnpm run gen:i18n-types') + process.exit(1) + } + + console.log(`🔧 Found ${typeNamespaces.length} namespaces in types`) + + const typeCamelCase = typeNamespaces.sort() + + // Compare arrays + const configSet = new Set(configCamelCase) + const typeSet = new Set(typeCamelCase) + + // Find missing in types + const missingInTypes = configCamelCase.filter(ns => !typeSet.has(ns)) + + // Find extra in types + const extraInTypes = typeCamelCase.filter(ns => !configSet.has(ns)) + + let hasErrors = false + + if (missingInTypes.length > 0) { + hasErrors = true + console.error('❌ Missing in type definitions:') + missingInTypes.forEach(ns => console.error(` - ${ns}`)) + } + + if (extraInTypes.length > 0) { + hasErrors = true + console.error('❌ Extra in type definitions:') + extraInTypes.forEach(ns => console.error(` - ${ns}`)) + } + + if (hasErrors) { + console.error('\n💡 To fix synchronization issues:') + console.error(' Run: pnpm run gen:i18n-types') + process.exit(1) + } + + console.log('✅ i18n types are synchronized') + + } catch (error) { + console.error('❌ Error:', error.message) + process.exit(1) + } +} + +if (require.main === module) { + main() +} \ No newline at end of file diff --git a/web/i18n-config/generate-i18n-types.js b/web/i18n-config/generate-i18n-types.js new file mode 100644 index 0000000000..ba34446962 --- /dev/null +++ b/web/i18n-config/generate-i18n-types.js @@ -0,0 +1,135 @@ +#!/usr/bin/env node + +const fs = require('fs') +const path = require('path') +const { camelCase } = require('lodash') + +// Import the NAMESPACES array from i18next-config.ts +function getNamespacesFromConfig() { + const configPath = path.join(__dirname, 'i18next-config.ts') + const configContent = fs.readFileSync(configPath, 'utf8') + + // Extract NAMESPACES array using regex + const namespacesMatch = configContent.match(/const NAMESPACES = \[([\s\S]*?)\]/) + if (!namespacesMatch) { + throw new Error('Could not find NAMESPACES array in i18next-config.ts') + } + + // Parse the namespaces + const namespacesStr = namespacesMatch[1] + const namespaces = namespacesStr + .split(',') + .map(line => line.trim()) + .filter(line => line.startsWith("'") || line.startsWith('"')) + .map(line => line.slice(1, -1)) // Remove quotes + + return namespaces +} + +function generateTypeDefinitions(namespaces) { + const header = `// TypeScript type definitions for Dify's i18next configuration +// This file is auto-generated. Do not edit manually. +// To regenerate, run: pnpm run gen:i18n-types +import 'react-i18next' + +// Extract types from translation files using typeof import pattern` + + // Generate individual type definitions + const typeDefinitions = namespaces.map(namespace => { + const typeName = camelCase(namespace).replace(/^\w/, c => c.toUpperCase()) + 'Messages' + return `type ${typeName} = typeof import('../i18n/en-US/${namespace}').default` + }).join('\n') + + // Generate Messages interface + const messagesInterface = ` +// Complete type structure that matches i18next-config.ts camelCase conversion +export type Messages = { +${namespaces.map(namespace => { + const camelCased = camelCase(namespace) + const typeName = camelCase(namespace).replace(/^\w/, c => c.toUpperCase()) + 'Messages' + return ` ${camelCased}: ${typeName};` + }).join('\n')} +}` + + const utilityTypes = ` +// Utility type to flatten nested object keys into dot notation +type FlattenKeys = T extends object + ? { + [K in keyof T]: T[K] extends object + ? \`\${K & string}.\${FlattenKeys & string}\` + : \`\${K & string}\` + }[keyof T] + : never + +export type ValidTranslationKeys = FlattenKeys` + + const moduleDeclarations = ` +// Extend react-i18next with Dify's type structure +declare module 'react-i18next' { + interface CustomTypeOptions { + defaultNS: 'translation'; + resources: { + translation: Messages; + }; + } +} + +// Extend i18next for complete type safety +declare module 'i18next' { + interface CustomTypeOptions { + defaultNS: 'translation'; + resources: { + translation: Messages; + }; + } +}` + + return [header, typeDefinitions, messagesInterface, utilityTypes, moduleDeclarations].join('\n\n') +} + +function main() { + const args = process.argv.slice(2) + const checkMode = args.includes('--check') + + try { + console.log('📦 Generating i18n type definitions...') + + // Get namespaces from config + const namespaces = getNamespacesFromConfig() + console.log(`✅ Found ${namespaces.length} namespaces`) + + // Generate type definitions + const typeDefinitions = generateTypeDefinitions(namespaces) + + const outputPath = path.join(__dirname, '../types/i18n.d.ts') + + if (checkMode) { + // Check mode: compare with existing file + if (!fs.existsSync(outputPath)) { + console.error('❌ Type definitions file does not exist') + process.exit(1) + } + + const existingContent = fs.readFileSync(outputPath, 'utf8') + if (existingContent.trim() !== typeDefinitions.trim()) { + console.error('❌ Type definitions are out of sync') + console.error(' Run: pnpm run gen:i18n-types') + process.exit(1) + } + + console.log('✅ Type definitions are in sync') + } else { + // Generate mode: write file + fs.writeFileSync(outputPath, typeDefinitions) + console.log(`✅ Generated type definitions: ${outputPath}`) + } + + } catch (error) { + console.error('❌ Error:', error.message) + process.exit(1) + } +} + +if (require.main === module) { + main() +} \ No newline at end of file diff --git a/web/package.json b/web/package.json index c736a37281..36be23d04c 100644 --- a/web/package.json +++ b/web/package.json @@ -35,6 +35,8 @@ "uglify-embed": "node ./bin/uglify-embed", "check-i18n": "node ./i18n-config/check-i18n.js", "auto-gen-i18n": "node ./i18n-config/auto-gen-i18n.js", + "gen:i18n-types": "node ./i18n-config/generate-i18n-types.js", + "check:i18n-types": "node ./i18n-config/check-i18n-sync.js", "test": "jest", "test:watch": "jest --watch", "storybook": "storybook dev -p 6006", diff --git a/web/types/i18n.d.ts b/web/types/i18n.d.ts new file mode 100644 index 0000000000..5020920bf2 --- /dev/null +++ b/web/types/i18n.d.ts @@ -0,0 +1,96 @@ +// TypeScript type definitions for Dify's i18next configuration +// This file is auto-generated. Do not edit manually. +// To regenerate, run: pnpm run gen:i18n-types +import 'react-i18next' + +// Extract types from translation files using typeof import pattern + +type AppAnnotationMessages = typeof import('../i18n/en-US/app-annotation').default +type AppApiMessages = typeof import('../i18n/en-US/app-api').default +type AppDebugMessages = typeof import('../i18n/en-US/app-debug').default +type AppLogMessages = typeof import('../i18n/en-US/app-log').default +type AppOverviewMessages = typeof import('../i18n/en-US/app-overview').default +type AppMessages = typeof import('../i18n/en-US/app').default +type BillingMessages = typeof import('../i18n/en-US/billing').default +type CommonMessages = typeof import('../i18n/en-US/common').default +type CustomMessages = typeof import('../i18n/en-US/custom').default +type DatasetCreationMessages = typeof import('../i18n/en-US/dataset-creation').default +type DatasetDocumentsMessages = typeof import('../i18n/en-US/dataset-documents').default +type DatasetHitTestingMessages = typeof import('../i18n/en-US/dataset-hit-testing').default +type DatasetSettingsMessages = typeof import('../i18n/en-US/dataset-settings').default +type DatasetMessages = typeof import('../i18n/en-US/dataset').default +type EducationMessages = typeof import('../i18n/en-US/education').default +type ExploreMessages = typeof import('../i18n/en-US/explore').default +type LayoutMessages = typeof import('../i18n/en-US/layout').default +type LoginMessages = typeof import('../i18n/en-US/login').default +type OauthMessages = typeof import('../i18n/en-US/oauth').default +type PluginTagsMessages = typeof import('../i18n/en-US/plugin-tags').default +type PluginMessages = typeof import('../i18n/en-US/plugin').default +type RegisterMessages = typeof import('../i18n/en-US/register').default +type RunLogMessages = typeof import('../i18n/en-US/run-log').default +type ShareMessages = typeof import('../i18n/en-US/share').default +type TimeMessages = typeof import('../i18n/en-US/time').default +type ToolsMessages = typeof import('../i18n/en-US/tools').default +type WorkflowMessages = typeof import('../i18n/en-US/workflow').default + +// Complete type structure that matches i18next-config.ts camelCase conversion +export type Messages = { + appAnnotation: AppAnnotationMessages; + appApi: AppApiMessages; + appDebug: AppDebugMessages; + appLog: AppLogMessages; + appOverview: AppOverviewMessages; + app: AppMessages; + billing: BillingMessages; + common: CommonMessages; + custom: CustomMessages; + datasetCreation: DatasetCreationMessages; + datasetDocuments: DatasetDocumentsMessages; + datasetHitTesting: DatasetHitTestingMessages; + datasetSettings: DatasetSettingsMessages; + dataset: DatasetMessages; + education: EducationMessages; + explore: ExploreMessages; + layout: LayoutMessages; + login: LoginMessages; + oauth: OauthMessages; + pluginTags: PluginTagsMessages; + plugin: PluginMessages; + register: RegisterMessages; + runLog: RunLogMessages; + share: ShareMessages; + time: TimeMessages; + tools: ToolsMessages; + workflow: WorkflowMessages; +} + +// Utility type to flatten nested object keys into dot notation +type FlattenKeys = T extends object + ? { + [K in keyof T]: T[K] extends object + ? `${K & string}.${FlattenKeys & string}` + : `${K & string}` + }[keyof T] + : never + +export type ValidTranslationKeys = FlattenKeys + +// Extend react-i18next with Dify's type structure +declare module 'react-i18next' { + type CustomTypeOptions = { + defaultNS: 'translation'; + resources: { + translation: Messages; + }; + } +} + +// Extend i18next for complete type safety +declare module 'i18next' { + type CustomTypeOptions = { + defaultNS: 'translation'; + resources: { + translation: Messages; + }; + } +} From 4d63bd208347defe071e8fce90592534d734176b Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 17:47:13 +0800 Subject: [PATCH 39/46] refactor(graph_engine): rename SimpleWorkerPool to WorkerPool --- api/core/workflow/graph_engine/graph_engine.py | 4 ++-- .../graph_engine/orchestration/execution_coordinator.py | 4 ++-- api/core/workflow/graph_engine/worker_management/__init__.py | 4 ++-- .../{simple_worker_pool.py => worker_pool.py} | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) rename api/core/workflow/graph_engine/worker_management/{simple_worker_pool.py => worker_pool.py} (99%) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index fb3c0aadf6..45f3ada7f5 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -39,7 +39,7 @@ from .orchestration import Dispatcher, ExecutionCoordinator from .protocols.command_channel import CommandChannel from .response_coordinator import ResponseStreamCoordinator from .state_management import UnifiedStateManager -from .worker_management import SimpleWorkerPool +from .worker_management import WorkerPool logger = logging.getLogger(__name__) @@ -187,7 +187,7 @@ class GraphEngine: context_vars = contextvars.copy_context() # Create worker pool for parallel node execution - self._worker_pool = SimpleWorkerPool( + self._worker_pool = WorkerPool( ready_queue=self._ready_queue, event_queue=self._event_queue, graph=self._graph, diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 3dd443ddb3..234a3607c3 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -8,7 +8,7 @@ from ..command_processing import CommandProcessor from ..domain import GraphExecution from ..event_management import EventManager from ..state_management import UnifiedStateManager -from ..worker_management import SimpleWorkerPool +from ..worker_management import WorkerPool if TYPE_CHECKING: from ..event_management import EventHandler @@ -30,7 +30,7 @@ class ExecutionCoordinator: event_handler: "EventHandler", event_collector: EventManager, command_processor: CommandProcessor, - worker_pool: SimpleWorkerPool, + worker_pool: WorkerPool, ) -> None: """ Initialize the execution coordinator. diff --git a/api/core/workflow/graph_engine/worker_management/__init__.py b/api/core/workflow/graph_engine/worker_management/__init__.py index 5b25dbc79a..03de1f6daa 100644 --- a/api/core/workflow/graph_engine/worker_management/__init__.py +++ b/api/core/workflow/graph_engine/worker_management/__init__.py @@ -5,8 +5,8 @@ This package manages the worker pool, including creation, scaling, and activity tracking. """ -from .simple_worker_pool import SimpleWorkerPool +from .worker_pool import WorkerPool __all__ = [ - "SimpleWorkerPool", + "WorkerPool", ] diff --git a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py similarity index 99% rename from api/core/workflow/graph_engine/worker_management/simple_worker_pool.py rename to api/core/workflow/graph_engine/worker_management/worker_pool.py index 367c2b36fc..40de0ffa08 100644 --- a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/worker_pool.py @@ -26,7 +26,7 @@ if TYPE_CHECKING: @final -class SimpleWorkerPool: +class WorkerPool: """ Simple worker pool with integrated management. From 2aeaefccece7063498e74a87bf1a7440b06b82dd Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 17:47:36 +0800 Subject: [PATCH 40/46] test: fix test --- .../parameter_extractor_node.py | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 445fe364ff..663b583590 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -577,16 +577,19 @@ class ParameterExtractorNode(Node): return int(value) elif isinstance(value, (int, float)): return value - if "." in value: - try: - return float(value) - except ValueError: - return None + elif isinstance(value, str): + if "." in value: + try: + return float(value) + except ValueError: + return None + else: + try: + return int(value) + except ValueError: + return None else: - try: - return int(value) - except ValueError: - return None + return None def _transform_result(self, data: ParameterExtractorNodeData, result: dict) -> dict: """ From ad9eed2551f2ec8ce59b87a1038df5b8b2273da8 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 19:11:22 +0800 Subject: [PATCH 41/46] fix: disable scale for perfermance Signed-off-by: -LAN- --- .../worker_management/worker_pool.py | 4 ++-- .../workflow/graph_engine/test_table_runner.py | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/api/core/workflow/graph_engine/worker_management/worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py index 40de0ffa08..cfc0f5ab65 100644 --- a/api/core/workflow/graph_engine/worker_management/worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/worker_pool.py @@ -151,8 +151,8 @@ class WorkerPool: worker_id=worker_id, flask_app=self._flask_app, context_vars=self._context_vars, - on_idle_callback=self._on_worker_idle, - on_active_callback=self._on_worker_active, + # on_idle_callback=self._on_worker_idle, + # on_active_callback=self._on_worker_active, ) worker.start() diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py b/api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py index c6e5f72888..744e558e99 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_table_runner.py @@ -15,6 +15,7 @@ import time from collections.abc import Callable, Sequence from concurrent.futures import ThreadPoolExecutor, as_completed from dataclasses import dataclass, field +from functools import lru_cache from pathlib import Path from typing import Any, Optional @@ -135,15 +136,12 @@ class WorkflowRunner: raise ValueError(f"Fixtures directory does not exist: {self.fixtures_dir}") def load_fixture(self, fixture_name: str) -> dict[str, Any]: - """Load a YAML fixture file.""" + """Load a YAML fixture file with caching to avoid repeated parsing.""" if not fixture_name.endswith(".yml") and not fixture_name.endswith(".yaml"): fixture_name = f"{fixture_name}.yml" fixture_path = self.fixtures_dir / fixture_name - if not fixture_path.exists(): - raise FileNotFoundError(f"Fixture file not found: {fixture_path}") - - return load_yaml_file(str(fixture_path), ignore_error=False) + return _load_fixture(fixture_path, fixture_name) def create_graph_from_fixture( self, @@ -709,3 +707,12 @@ class TableTestRunner: report.append("=" * 80) return "\n".join(report) + + +@lru_cache(maxsize=32) +def _load_fixture(fixture_path: Path, fixture_name: str) -> dict[str, Any]: + """Load a YAML fixture file with caching to avoid repeated parsing.""" + if not fixture_path.exists(): + raise FileNotFoundError(f"Fixture file not found: {fixture_path}") + + return load_yaml_file(str(fixture_path), ignore_error=False) From aff7ca12b8b84cdf900cbe1598ae18a551856e11 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 19:25:08 +0800 Subject: [PATCH 42/46] fix(code_node): type checking bypass Signed-off-by: -LAN- --- api/core/workflow/nodes/code/code_node.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 61820cc700..07cb726451 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -265,8 +265,11 @@ class CodeNode(Node): elif output_config.type == SegmentType.STRING: # check if string available + value = result.get("output_name") + if value is not None and not isinstance(value, str): + raise OutputValidationError(f"Output value `{value}` is not string") transformed_result[output_name] = self._check_string( - value=result[output_name], + value=value, variable=f"{prefix}{dot}{output_name}", ) elif output_config.type == SegmentType.BOOLEAN: From 36048d152603a9a46264fd3b5916e20578db7bc0 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 19:32:07 +0800 Subject: [PATCH 43/46] feat(graph_engine): allow to scale down without lock Signed-off-by: -LAN- --- api/core/workflow/graph_engine/worker.py | 16 +++++ .../worker_management/worker_pool.py | 65 ++++--------------- 2 files changed, 29 insertions(+), 52 deletions(-) diff --git a/api/core/workflow/graph_engine/worker.py b/api/core/workflow/graph_engine/worker.py index df2fbf486e..8f2ae06795 100644 --- a/api/core/workflow/graph_engine/worker.py +++ b/api/core/workflow/graph_engine/worker.py @@ -74,6 +74,22 @@ class Worker(threading.Thread): """Signal the worker to stop processing.""" self._stop_event.set() + @property + def is_idle(self) -> bool: + """Check if the worker is currently idle.""" + # Worker is idle if it hasn't processed a task recently (within 0.2 seconds) + return (time.time() - self._last_task_time) > 0.2 + + @property + def idle_duration(self) -> float: + """Get the duration in seconds since the worker last processed a task.""" + return time.time() - self._last_task_time + + @property + def worker_id(self) -> int: + """Get the worker's ID.""" + return self._worker_id + @override def run(self) -> None: """ diff --git a/api/core/workflow/graph_engine/worker_management/worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py index cfc0f5ab65..d4b09219b6 100644 --- a/api/core/workflow/graph_engine/worker_management/worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/worker_pool.py @@ -8,7 +8,6 @@ DynamicScaler, and WorkerFactory into a single class. import logging import queue import threading -import time from typing import TYPE_CHECKING, final from configs import dify_config @@ -78,9 +77,7 @@ class WorkerPool: self._lock = threading.RLock() self._running = False - # Track worker idle times for scale-down - self._worker_idle_times: dict[int, float] = {} - self._worker_active_states: dict[int, bool] = {} + # No longer tracking worker states with callbacks to avoid lock contention def start(self, initial_count: int | None = None) -> None: """ @@ -136,8 +133,6 @@ class WorkerPool: worker.join(timeout=10.0) self._workers.clear() - self._worker_active_states.clear() - self._worker_idle_times.clear() def _create_worker(self) -> None: """Create and start a new worker.""" @@ -158,31 +153,6 @@ class WorkerPool: worker.start() self._workers.append(worker) - # Initialize tracking - self._worker_active_states[worker_id] = True - self._worker_idle_times[worker_id] = 0.0 - - def _on_worker_idle(self, worker_id: int) -> None: - """Handle worker becoming idle.""" - with self._lock: - if worker_id not in self._worker_active_states: - return - - # Mark as idle and record time if transitioning from active - if self._worker_active_states.get(worker_id, False): - self._worker_active_states[worker_id] = False - self._worker_idle_times[worker_id] = time.time() - - def _on_worker_active(self, worker_id: int) -> None: - """Handle worker becoming active.""" - with self._lock: - if worker_id not in self._worker_active_states: - return - - # Mark as active and clear idle time - self._worker_active_states[worker_id] = True - self._worker_idle_times[worker_id] = 0.0 - def _remove_worker(self, worker: Worker, worker_id: int) -> None: """Remove a specific worker from the pool.""" # Stop the worker @@ -192,14 +162,10 @@ class WorkerPool: if worker.is_alive(): worker.join(timeout=2.0) - # Remove from list and tracking + # Remove from list if worker in self._workers: self._workers.remove(worker) - # Clean up tracking - self._worker_active_states.pop(worker_id, None) - self._worker_idle_times.pop(worker_id, None) - def _try_scale_up(self, queue_depth: int, current_count: int) -> bool: """ Try to scale up workers if needed. @@ -252,23 +218,18 @@ class WorkerPool: if not has_excess_capacity: return False - # Find and remove idle workers - current_time = time.time() + # Find and remove idle workers that have been idle long enough workers_to_remove = [] for worker in self._workers: - worker_id = worker._worker_id - # Check if worker is idle and has exceeded idle time threshold - if not self._worker_active_states.get(worker_id, True) and self._worker_idle_times.get(worker_id, 0) > 0: - idle_duration = current_time - self._worker_idle_times[worker_id] - if idle_duration >= self._scale_down_idle_time: - # Don't remove if it would leave us unable to handle the queue - remaining_workers = current_count - len(workers_to_remove) - 1 - if remaining_workers >= self._min_workers and remaining_workers >= max(1, queue_depth // 2): - workers_to_remove.append((worker, worker_id)) - # Only remove one worker per check to avoid aggressive scaling - break + if worker.is_idle and worker.idle_duration >= self._scale_down_idle_time: + # Don't remove if it would leave us unable to handle the queue + remaining_workers = current_count - len(workers_to_remove) - 1 + if remaining_workers >= self._min_workers and remaining_workers >= max(1, queue_depth // 2): + workers_to_remove.append((worker, worker.worker_id)) + # Only remove one worker per check to avoid aggressive scaling + break # Remove idle workers if any found if workers_to_remove: @@ -300,9 +261,9 @@ class WorkerPool: current_count = len(self._workers) queue_depth = self._ready_queue.qsize() - # Count active vs idle workers - active_count = sum(1 for state in self._worker_active_states.values() if state) - idle_count = current_count - active_count + # Count active vs idle workers by querying their state directly + idle_count = sum(1 for worker in self._workers if worker.is_idle) + active_count = current_count - idle_count # Try to scale up if queue is backing up self._try_scale_up(queue_depth, current_count) From e229510e73c4191b38e84ff2e4c5145ef3181a38 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 19:37:31 +0800 Subject: [PATCH 44/46] perf: eliminate lock contention in worker pool by removing callbacks Remove worker idle/active callbacks that caused severe lock contention. Instead, use sampling-based monitoring where worker states are queried on-demand during scaling decisions. This eliminates the performance bottleneck caused by workers acquiring locks 10+ times per second. Changes: - Remove callback parameters from Worker class - Add properties to expose worker idle state directly - Update WorkerPool to query worker states without callbacks - Maintain scaling functionality with better performance --- api/core/workflow/graph_engine/worker.py | 14 -------------- .../graph_engine/worker_management/worker_pool.py | 2 -- 2 files changed, 16 deletions(-) diff --git a/api/core/workflow/graph_engine/worker.py b/api/core/workflow/graph_engine/worker.py index 8f2ae06795..e7462309c9 100644 --- a/api/core/workflow/graph_engine/worker.py +++ b/api/core/workflow/graph_engine/worker.py @@ -9,7 +9,6 @@ import contextvars import queue import threading import time -from collections.abc import Callable from datetime import datetime from typing import final from uuid import uuid4 @@ -42,8 +41,6 @@ class Worker(threading.Thread): worker_id: int = 0, flask_app: Flask | None = None, context_vars: contextvars.Context | None = None, - on_idle_callback: Callable[[int], None] | None = None, - on_active_callback: Callable[[int], None] | None = None, ) -> None: """ Initialize worker thread. @@ -55,8 +52,6 @@ class Worker(threading.Thread): worker_id: Unique identifier for this worker flask_app: Optional Flask application for context preservation context_vars: Optional context variables to preserve in worker thread - on_idle_callback: Optional callback when worker becomes idle - on_active_callback: Optional callback when worker becomes active """ super().__init__(name=f"GraphWorker-{worker_id}", daemon=True) self._ready_queue = ready_queue @@ -66,8 +61,6 @@ class Worker(threading.Thread): self._flask_app = flask_app self._context_vars = context_vars self._stop_event = threading.Event() - self._on_idle_callback = on_idle_callback - self._on_active_callback = on_active_callback self._last_task_time = time.time() def stop(self) -> None: @@ -103,15 +96,8 @@ class Worker(threading.Thread): try: node_id = self._ready_queue.get(timeout=0.1) except queue.Empty: - # Notify that worker is idle - if self._on_idle_callback: - self._on_idle_callback(self._worker_id) continue - # Notify that worker is active - if self._on_active_callback: - self._on_active_callback(self._worker_id) - self._last_task_time = time.time() node = self._graph.nodes[node_id] try: diff --git a/api/core/workflow/graph_engine/worker_management/worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py index d4b09219b6..25671ce6ba 100644 --- a/api/core/workflow/graph_engine/worker_management/worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/worker_pool.py @@ -146,8 +146,6 @@ class WorkerPool: worker_id=worker_id, flask_app=self._flask_app, context_vars=self._context_vars, - # on_idle_callback=self._on_worker_idle, - # on_active_callback=self._on_worker_active, ) worker.start() From f6a2a09815e3f59cbb8f89686eda117413bf0d1e Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 20:04:29 +0800 Subject: [PATCH 45/46] test: fix code node Signed-off-by: -LAN- --- api/core/workflow/nodes/code/code_node.py | 40 ++++++++++++++--------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 07cb726451..04c73093a2 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -279,31 +279,36 @@ class CodeNode(Node): ) elif output_config.type == SegmentType.ARRAY_NUMBER: # check if array of number available - if not isinstance(result[output_name], list): - if result[output_name] is None: + value = result[output_name] + if not isinstance(value, list): + if value is None: transformed_result[output_name] = None else: raise OutputValidationError( - f"Output {prefix}{dot}{output_name} is not an array," - f" got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array, got {type(value)} instead." ) else: - if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: + if len(value) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: raise OutputValidationError( f"The length of output variable `{prefix}{dot}{output_name}` must be" f" less than {dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH} elements." ) + for i, inner_value in enumerate(value): + if not isinstance(inner_value, (int, float)): + raise OutputValidationError( + f"The element at index {i} of output variable `{prefix}{dot}{output_name}` must be" + f" a number." + ) + _ = self._check_number(value=inner_value, variable=f"{prefix}{dot}{output_name}[{i}]") transformed_result[output_name] = [ # If the element is a boolean and the output schema specifies a `array[number]` type, # convert the boolean value to an integer. # # This ensures compatibility with existing workflows that may use # `True` and `False` as values for NUMBER type outputs. - self._convert_boolean_to_int( - self._check_number(value=value, variable=f"{prefix}{dot}{output_name}[{i}]"), - ) - for i, value in enumerate(result[output_name]) + self._convert_boolean_to_int(v) + for v in value ] elif output_config.type == SegmentType.ARRAY_STRING: # check if array of string available @@ -366,8 +371,9 @@ class CodeNode(Node): ] elif output_config.type == SegmentType.ARRAY_BOOLEAN: # check if array of object available - if not isinstance(result[output_name], list): - if result[output_name] is None: + value = result[output_name] + if not isinstance(value, list): + if value is None: transformed_result[output_name] = None else: raise OutputValidationError( @@ -375,10 +381,14 @@ class CodeNode(Node): f" got {type(result.get(output_name))} instead." ) else: - transformed_result[output_name] = [ - self._check_boolean(value=value, variable=f"{prefix}{dot}{output_name}[{i}]") - for i, value in enumerate(result[output_name]) - ] + for i, inner_value in enumerate(value): + if not isinstance(inner_value, bool | None): + raise OutputValidationError( + f"Output {prefix}{dot}{output_name}[{i}] is not a boolean," + f" got {type(inner_value)} instead." + ) + _ = self._check_boolean(value=inner_value, variable=f"{prefix}{dot}{output_name}[{i}]") + transformed_result[output_name] = value else: raise OutputValidationError(f"Output type {output_config.type} is not supported.") From 9c2943183e30b17deac5b08b19147ae48373b01b Mon Sep 17 00:00:00 2001 From: -LAN- Date: Thu, 4 Sep 2025 20:17:28 +0800 Subject: [PATCH 46/46] test: fix code node Signed-off-by: -LAN- --- api/core/workflow/nodes/code/code_node.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index 04c73093a2..bd713e49ea 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -255,7 +255,13 @@ class CodeNode(Node): ) elif output_config.type == SegmentType.NUMBER: # check if number available - checked = self._check_number(value=result[output_name], variable=f"{prefix}{dot}{output_name}") + value = result.get(output_name) + if not isinstance(value, (int, float, None)): + raise OutputValidationError( + f"Output {prefix}{dot}{output_name} is not a number," + f" got {type(result.get(output_name))} instead." + ) + checked = self._check_number(value=value, variable=f"{prefix}{dot}{output_name}") # If the output is a boolean and the output schema specifies a NUMBER type, # convert the boolean value to an integer. #