Merge remote-tracking branch 'origin/main' into feat/trigger

This commit is contained in:
lyzno1 2025-09-30 18:56:21 +08:00
commit 1bfa8e6662
30 changed files with 704 additions and 1210 deletions

View File

@ -1,9 +1,11 @@
import logging
import queue
import time
from abc import abstractmethod
from enum import IntEnum, auto
from typing import Any
from redis.exceptions import RedisError
from sqlalchemy.orm import DeclarativeMeta
from configs import dify_config
@ -18,6 +20,8 @@ from core.app.entities.queue_entities import (
)
from extensions.ext_redis import redis_client
logger = logging.getLogger(__name__)
class PublishFrom(IntEnum):
APPLICATION_MANAGER = auto()
@ -35,9 +39,8 @@ class AppQueueManager:
self.invoke_from = invoke_from # Public accessor for invoke_from
user_prefix = "account" if self._invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER} else "end-user"
redis_client.setex(
AppQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}"
)
self._task_belong_cache_key = AppQueueManager._generate_task_belong_cache_key(self._task_id)
redis_client.setex(self._task_belong_cache_key, 1800, f"{user_prefix}-{self._user_id}")
q: queue.Queue[WorkflowQueueMessage | MessageQueueMessage | None] = queue.Queue()
@ -79,9 +82,21 @@ class AppQueueManager:
Stop listen to queue
:return:
"""
self._clear_task_belong_cache()
self._q.put(None)
def publish_error(self, e, pub_from: PublishFrom):
def _clear_task_belong_cache(self) -> None:
"""
Remove the task belong cache key once listening is finished.
"""
try:
redis_client.delete(self._task_belong_cache_key)
except RedisError:
logger.exception(
"Failed to clear task belong cache for task %s (key: %s)", self._task_id, self._task_belong_cache_key
)
def publish_error(self, e, pub_from: PublishFrom) -> None:
"""
Publish error
:param e: error

View File

@ -74,7 +74,7 @@ class TextPromptMessageContent(PromptMessageContent):
Model class for text prompt message content.
"""
type: Literal[PromptMessageContentType.TEXT] = PromptMessageContentType.TEXT
type: Literal[PromptMessageContentType.TEXT] = PromptMessageContentType.TEXT # type: ignore
data: str
@ -95,11 +95,11 @@ class MultiModalPromptMessageContent(PromptMessageContent):
class VideoPromptMessageContent(MultiModalPromptMessageContent):
type: Literal[PromptMessageContentType.VIDEO] = PromptMessageContentType.VIDEO
type: Literal[PromptMessageContentType.VIDEO] = PromptMessageContentType.VIDEO # type: ignore
class AudioPromptMessageContent(MultiModalPromptMessageContent):
type: Literal[PromptMessageContentType.AUDIO] = PromptMessageContentType.AUDIO
type: Literal[PromptMessageContentType.AUDIO] = PromptMessageContentType.AUDIO # type: ignore
class ImagePromptMessageContent(MultiModalPromptMessageContent):
@ -111,12 +111,12 @@ class ImagePromptMessageContent(MultiModalPromptMessageContent):
LOW = auto()
HIGH = auto()
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE # type: ignore
detail: DETAIL = DETAIL.LOW
class DocumentPromptMessageContent(MultiModalPromptMessageContent):
type: Literal[PromptMessageContentType.DOCUMENT] = PromptMessageContentType.DOCUMENT
type: Literal[PromptMessageContentType.DOCUMENT] = PromptMessageContentType.DOCUMENT # type: ignore
PromptMessageContentUnionTypes = Annotated[

View File

@ -15,7 +15,7 @@ class GPT2Tokenizer:
use gpt2 tokenizer to get num tokens
"""
_tokenizer = GPT2Tokenizer.get_encoder()
tokens = _tokenizer.encode(text)
tokens = _tokenizer.encode(text) # type: ignore
return len(tokens)
@staticmethod

View File

@ -196,15 +196,15 @@ def jsonable_encoder(
return encoder(obj)
try:
data = dict(obj)
data = dict(obj) # type: ignore
except Exception as e:
errors: list[Exception] = []
errors.append(e)
try:
data = vars(obj)
data = vars(obj) # type: ignore
except Exception as e:
errors.append(e)
raise ValueError(errors) from e
raise ValueError(str(errors)) from e
return jsonable_encoder(
data,
by_alias=by_alias,

View File

@ -3,7 +3,8 @@ from dataclasses import dataclass
from typing import Any
from opentelemetry import trace as trace_api
from opentelemetry.sdk.trace import Event, Status, StatusCode
from opentelemetry.sdk.trace import Event
from opentelemetry.trace import Status, StatusCode
from pydantic import BaseModel, Field

View File

@ -155,7 +155,10 @@ class OpsTraceManager:
if key in tracing_config:
if "*" in tracing_config[key]:
# If the key contains '*', retain the original value from the current config
new_config[key] = current_trace_config.get(key, tracing_config[key])
if current_trace_config:
new_config[key] = current_trace_config.get(key, tracing_config[key])
else:
new_config[key] = tracing_config[key]
else:
# Otherwise, encrypt the key
new_config[key] = encrypt_token(tenant_id, tracing_config[key])

View File

@ -62,7 +62,8 @@ class WeaveDataTrace(BaseTraceInstance):
self,
):
try:
project_url = f"https://wandb.ai/{self.weave_client._project_id()}"
project_identifier = f"{self.entity}/{self.project_name}" if self.entity else self.project_name
project_url = f"https://wandb.ai/{project_identifier}"
return project_url
except Exception as e:
logger.debug("Weave get run url failed: %s", str(e))
@ -424,7 +425,23 @@ class WeaveDataTrace(BaseTraceInstance):
raise ValueError(f"Weave API check failed: {str(e)}")
def start_call(self, run_data: WeaveTraceModel, parent_run_id: str | None = None):
call = self.weave_client.create_call(op=run_data.op, inputs=run_data.inputs, attributes=run_data.attributes)
inputs = run_data.inputs
if inputs is None:
inputs = {}
elif not isinstance(inputs, dict):
inputs = {"inputs": str(inputs)}
attributes = run_data.attributes
if attributes is None:
attributes = {}
elif not isinstance(attributes, dict):
attributes = {"attributes": str(attributes)}
call = self.weave_client.create_call(
op=run_data.op,
inputs=inputs,
attributes=attributes,
)
self.calls[run_data.id] = call
if parent_run_id:
self.calls[run_data.id].parent_id = parent_run_id
@ -432,6 +449,7 @@ class WeaveDataTrace(BaseTraceInstance):
def finish_call(self, run_data: WeaveTraceModel):
call = self.calls.get(run_data.id)
if call:
self.weave_client.finish_call(call=call, output=run_data.outputs, exception=run_data.exception)
exception = Exception(run_data.exception) if run_data.exception else None
self.weave_client.finish_call(call=call, output=run_data.outputs, exception=exception)
else:
raise ValueError(f"Call with id {run_data.id} not found")

View File

@ -106,7 +106,9 @@ class RetrievalService:
if exceptions:
raise ValueError(";\n".join(exceptions))
# Deduplicate documents for hybrid search to avoid duplicate chunks
if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
all_documents = cls._deduplicate_documents(all_documents)
data_post_processor = DataPostProcessor(
str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
)
@ -143,6 +145,40 @@ class RetrievalService:
)
return all_documents
@classmethod
def _deduplicate_documents(cls, documents: list[Document]) -> list[Document]:
"""Deduplicate documents based on doc_id to avoid duplicate chunks in hybrid search."""
if not documents:
return documents
unique_documents = []
seen_doc_ids = set()
for document in documents:
# For dify provider documents, use doc_id for deduplication
if document.provider == "dify" and document.metadata is not None and "doc_id" in document.metadata:
doc_id = document.metadata["doc_id"]
if doc_id not in seen_doc_ids:
seen_doc_ids.add(doc_id)
unique_documents.append(document)
# If duplicate, keep the one with higher score
elif "score" in document.metadata:
# Find existing document with same doc_id and compare scores
for i, existing_doc in enumerate(unique_documents):
if (
existing_doc.metadata
and existing_doc.metadata.get("doc_id") == doc_id
and existing_doc.metadata.get("score", 0) < document.metadata.get("score", 0)
):
unique_documents[i] = document
break
else:
# For non-dify documents, use content-based deduplication
if document not in unique_documents:
unique_documents.append(document)
return unique_documents
@classmethod
def _get_dataset(cls, dataset_id: str) -> Dataset | None:
with Session(db.engine) as session:

View File

@ -20,6 +20,7 @@ class ModelInvokeCompletedEvent(NodeEventBase):
usage: LLMUsage
finish_reason: str | None = None
reasoning_content: str | None = None
structured_output: dict | None = None
class RunRetryEvent(NodeEventBase):

View File

@ -87,7 +87,7 @@ class Executor:
node_data.authorization.config.api_key
).text
self.url: str = node_data.url
self.url = node_data.url
self.method = node_data.method
self.auth = node_data.authorization
self.timeout = timeout
@ -349,11 +349,10 @@ class Executor:
"timeout": (self.timeout.connect, self.timeout.read, self.timeout.write),
"ssl_verify": self.ssl_verify,
"follow_redirects": True,
"max_retries": self.max_retries,
}
# request_args = {k: v for k, v in request_args.items() if v is not None}
try:
response: httpx.Response = _METHOD_MAP[method_lc](**request_args)
response: httpx.Response = _METHOD_MAP[method_lc](**request_args, max_retries=self.max_retries)
except (ssrf_proxy.MaxRetriesExceededError, httpx.RequestError) as e:
raise HttpRequestNodeError(str(e)) from e
# FIXME: fix type ignore, this maybe httpx type issue

View File

@ -165,6 +165,8 @@ class HttpRequestNode(Node):
body_type = typed_node_data.body.type
data = typed_node_data.body.data
match body_type:
case "none":
pass
case "binary":
if len(data) != 1:
raise RequestBodyError("invalid body data, should have only one item")

View File

@ -83,7 +83,7 @@ class IfElseNode(Node):
else:
# TODO: Update database then remove this
# Fallback to old structure if cases are not defined
input_conditions, group_result, final_result = _should_not_use_old_function( # ty: ignore [deprecated]
input_conditions, group_result, final_result = _should_not_use_old_function( # pyright: ignore [reportDeprecated]
condition_processor=condition_processor,
variable_pool=self.graph_runtime_state.variable_pool,
conditions=self._node_data.conditions or [],

View File

@ -136,6 +136,11 @@ class KnowledgeIndexNode(Node):
document = db.session.query(Document).filter_by(id=document_id.value).first()
if not document:
raise KnowledgeIndexNodeError(f"Document {document_id.value} not found.")
doc_id_value = document.id
ds_id_value = dataset.id
dataset_name_value = dataset.name
document_name_value = document.name
created_at_value = document.created_at
# chunk nodes by chunk size
indexing_start_at = time.perf_counter()
index_processor = IndexProcessorFactory(dataset.chunk_structure).init_index_processor()
@ -161,16 +166,16 @@ class KnowledgeIndexNode(Node):
document.word_count = (
db.session.query(func.sum(DocumentSegment.word_count))
.where(
DocumentSegment.document_id == document.id,
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.document_id == doc_id_value,
DocumentSegment.dataset_id == ds_id_value,
)
.scalar()
)
db.session.add(document)
# update document segment status
db.session.query(DocumentSegment).where(
DocumentSegment.document_id == document.id,
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.document_id == doc_id_value,
DocumentSegment.dataset_id == ds_id_value,
).update(
{
DocumentSegment.status: "completed",
@ -182,13 +187,13 @@ class KnowledgeIndexNode(Node):
db.session.commit()
return {
"dataset_id": dataset.id,
"dataset_name": dataset.name,
"dataset_id": ds_id_value,
"dataset_name": dataset_name_value,
"batch": batch.value,
"document_id": document.id,
"document_name": document.name,
"created_at": document.created_at.timestamp(),
"display_status": document.indexing_status,
"document_id": doc_id_value,
"document_name": document_name_value,
"created_at": created_at_value.timestamp(),
"display_status": "completed",
}
def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]:

View File

@ -107,7 +107,7 @@ class KnowledgeRetrievalNode(Node):
graph_runtime_state=graph_runtime_state,
)
# LLM file outputs, used for MultiModal outputs.
self._file_outputs: list[File] = []
self._file_outputs = []
if llm_file_saver is None:
llm_file_saver = FileSaverImpl(

View File

@ -161,6 +161,8 @@ class ListOperatorNode(Node):
elif isinstance(variable, ArrayFileSegment):
if isinstance(condition.value, str):
value = self.graph_runtime_state.variable_pool.convert_template(condition.value).text
elif isinstance(condition.value, bool):
raise ValueError(f"File filter expects a string value, got {type(condition.value)}")
else:
value = condition.value
filter_func = _get_file_filter_func(

View File

@ -46,7 +46,7 @@ class LLMFileSaver(tp.Protocol):
dot (`.`). For example, `.py` and `.tar.gz` are both valid values, while `py`
and `tar.gz` are not.
"""
pass
raise NotImplementedError()
def save_remote_url(self, url: str, file_type: FileType) -> File:
"""save_remote_url saves the file from a remote url returned by LLM.
@ -56,7 +56,7 @@ class LLMFileSaver(tp.Protocol):
:param url: the url of the file.
:param file_type: the file type of the file, check `FileType` enum for reference.
"""
pass
raise NotImplementedError()
EngineFactory: tp.TypeAlias = tp.Callable[[], Engine]

View File

@ -23,6 +23,7 @@ from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
LLMStructuredOutput,
LLMUsage,
)
@ -127,7 +128,7 @@ class LLMNode(Node):
graph_runtime_state=graph_runtime_state,
)
# LLM file outputs, used for MultiModal outputs.
self._file_outputs: list[File] = []
self._file_outputs = []
if llm_file_saver is None:
llm_file_saver = FileSaverImpl(
@ -165,6 +166,7 @@ class LLMNode(Node):
node_inputs: dict[str, Any] = {}
process_data: dict[str, Any] = {}
result_text = ""
clean_text = ""
usage = LLMUsage.empty_usage()
finish_reason = None
reasoning_content = None
@ -278,6 +280,13 @@ class LLMNode(Node):
# Extract clean text from <think> tags
clean_text, _ = LLMNode._split_reasoning(result_text, self._node_data.reasoning_format)
# Process structured output if available from the event.
structured_output = (
LLMStructuredOutput(structured_output=event.structured_output)
if event.structured_output
else None
)
# deduct quota
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
break
@ -1048,7 +1057,7 @@ class LLMNode(Node):
@staticmethod
def handle_blocking_result(
*,
invoke_result: LLMResult,
invoke_result: LLMResult | LLMResultWithStructuredOutput,
saver: LLMFileSaver,
file_outputs: list["File"],
reasoning_format: Literal["separated", "tagged"] = "tagged",
@ -1079,6 +1088,8 @@ class LLMNode(Node):
finish_reason=None,
# Reasoning content for workflow variables and downstream nodes
reasoning_content=reasoning_content,
# Pass structured output if enabled
structured_output=getattr(invoke_result, "structured_output", None),
)
@staticmethod

View File

@ -179,6 +179,6 @@ CHAT_EXAMPLE = [
"required": ["food"],
},
},
"assistant": {"text": "I need to output a valid JSON object.", "json": {"result": "apple pie"}},
"assistant": {"text": "I need to output a valid JSON object.", "json": {"food": "apple pie"}},
},
]

View File

@ -68,7 +68,7 @@ class QuestionClassifierNode(Node):
graph_runtime_state=graph_runtime_state,
)
# LLM file outputs, used for MultiModal outputs.
self._file_outputs: list[File] = []
self._file_outputs = []
if llm_file_saver is None:
llm_file_saver = FileSaverImpl(
@ -111,9 +111,9 @@ class QuestionClassifierNode(Node):
query = variable.value if variable else None
variables = {"query": query}
# fetch model config
model_instance, model_config = LLMNode._fetch_model_config(
node_data_model=node_data.model,
model_instance, model_config = llm_utils.fetch_model_config(
tenant_id=self.tenant_id,
node_data_model=node_data.model,
)
# fetch memory
memory = llm_utils.fetch_memory(

View File

@ -416,4 +416,8 @@ class WorkflowEntry:
# append variable and value to variable pool
if variable_node_id != ENVIRONMENT_VARIABLE_NODE_ID:
# In single run, the input_value is set as the LLM's structured output value within the variable_pool.
if len(variable_key_list) == 2 and variable_key_list[0] == "structured_output":
input_value = {variable_key_list[1]: input_value}
variable_key_list = variable_key_list[0:1]
variable_pool.add([variable_node_id] + variable_key_list, input_value)

View File

@ -6,9 +6,6 @@
"migrations/",
"core/rag",
"extensions",
"core/ops",
"core/model_runtime",
"core/workflow/nodes",
"core/app/app_config/easy_ui_based_app/dataset"
],
"typeCheckingMode": "strict",
@ -37,4 +34,4 @@
"reportAttributeAccessIssue": "hint",
"pythonVersion": "3.11",
"pythonPlatform": "All"
}
}

View File

@ -52,7 +52,8 @@ def check_upgradable_plugin_task():
strategy.include_plugins,
)
if batch_interval_time > 0.0001: # if lower than 1ms, skip
# Only sleep if batch_interval_time > 0.0001 AND current batch is not the last one
if batch_interval_time > 0.0001 and i + MAX_CONCURRENT_CHECK_TASKS < total_strategies:
time.sleep(batch_interval_time)
end_at = time.perf_counter()

View File

@ -93,7 +93,7 @@ logger = logging.getLogger(__name__)
class DatasetService:
@staticmethod
def get_datasets(page, per_page, tenant_id=None, user=None, search=None, tag_ids=None, include_all=False):
query = select(Dataset).where(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc())
query = select(Dataset).where(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc(), Dataset.id)
if user:
# get permitted dataset ids

View File

@ -152,23 +152,20 @@ const Apps = ({
<div className={cn(
'mt-6 flex items-center justify-between px-12',
)}>
<>
<Category
list={categories}
value={currCategory}
onChange={setCurrCategory}
allCategoriesEn={allCategoriesEn}
/>
</>
<Category
list={categories}
value={currCategory}
onChange={setCurrCategory}
allCategoriesEn={allCategoriesEn}
/>
<Input
showLeftIcon
showClearIcon
wrapperClassName='w-[200px]'
wrapperClassName='w-[200px] self-start'
value={keywords}
onChange={e => handleKeywordsChange(e.target.value)}
onClear={() => handleKeywordsChange('')}
/>
</div>
<div className={cn(

View File

@ -941,9 +941,9 @@ const translation = {
chunkIsRequired: 'ساختار تکه ای مورد نیاز است',
chooseChunkStructure: 'یک ساختار تکه ای را انتخاب کنید',
chunksInput: 'تکه‌ها',
chunksInputTip: 'متغیر ورودی گره پایگاه دانش چانکها است. نوع متغیر یک شیء با یک طرح JSON خاص است که باید با ساختار چانک انتخاب شده سازگار باشد.',
chunksInputTip: 'متغیر ورودی گره پایگاه دانش تکهها است. نوع متغیر یک شیء با یک طرح JSON خاص است که باید با ساختار تکه انتخاب شده سازگار باشد.',
embeddingModelIsRequired: 'مدل جاسازی مورد نیاز است',
chunksVariableIsRequired: 'متغیر Chunks الزامی است',
chunksVariableIsRequired: 'متغیر تکه‌ها الزامی است',
rerankingModelIsRequired: 'مدل رتبه‌بندی مجدد مورد نیاز است',
},
},

View File

@ -961,8 +961,8 @@ const translation = {
aboutRetrieval: 'पुनर्प्राप्ति विधि के बारे में।',
chooseChunkStructure: 'एक चंक संरचना चुनें',
chunksInput: 'टुकड़े',
chunksInputTip: 'ज्ञान आधार नोड का इनपुट वेरिएबल चंक्स है। वेरिएबल प्रकार एक ऑब्जेक्ट है जिसमें एक विशेष JSON स्कीमा है जो चयनित चंक संरचना के साथ सुसंगत होना चाहिए।',
chunksVariableIsRequired: 'Chunks चर आवश्यक है',
chunksInputTip: 'ज्ञान आधार नोड का इनपुट वेरिएबल टुकड़े है। वेरिएबल प्रकार एक ऑब्जेक्ट है जिसमें एक विशेष JSON स्कीमा है जो चयनित चंक संरचना के साथ सुसंगत होना चाहिए।',
chunksVariableIsRequired: 'टुकड़े चर आवश्यक है',
embeddingModelIsRequired: 'एम्बेडिंग मॉडल आवश्यक है',
rerankingModelIsRequired: 'पुनः क्रमांकन मॉडल की आवश्यकता है',
},

View File

@ -950,7 +950,7 @@ const translation = {
chunksInput: 'Kosi',
chunksInputTip: 'Vhodna spremenljivka vozlišča podatkovne baze je Chunks. Tip spremenljivke je objekt s specifično JSON shemo, ki mora biti skladna z izbrano strukturo kosov.',
chunksVariableIsRequired: 'Spremenljivka Chunks je obvezna',
embeddingModelIsRequired: 'Zahtuje se vgrajevalni model',
embeddingModelIsRequired: 'Zahteva se vgrajevalni model',
rerankingModelIsRequired: 'Potreben je model za ponovno razvrščanje',
},
},

View File

@ -944,7 +944,7 @@ const translation = {
chunksInputTip: 'Вхідна змінна вузла бази знань - це Частини. Тип змінної - об\'єкт з певною JSON-схемою, яка повинна відповідати вибраній структурі частин.',
chunksVariableIsRequired: 'Змінна chunks є обов\'язковою',
embeddingModelIsRequired: 'Потрібна модель вбудовування',
rerankingModelIsRequired: 'Потрібна модель перенавчання',
rerankingModelIsRequired: 'Потрібна модель повторного ранжування',
},
},
tracing: {

View File

@ -2,7 +2,7 @@
"name": "dify-web",
"version": "1.9.1",
"private": true,
"packageManager": "pnpm@10.16.0",
"packageManager": "pnpm@10.17.1",
"engines": {
"node": ">=v22.11.0"
},
@ -39,13 +39,12 @@
"storybook": "storybook dev -p 6006",
"build-storybook": "storybook build",
"preinstall": "npx only-allow pnpm",
"analyze": "ANALYZE=true pnpm build"
"analyze": "ANALYZE=true pnpm build",
"knip": "knip"
},
"dependencies": {
"@babel/runtime": "^7.22.3",
"@dagrejs/dagre": "^1.1.4",
"@emoji-mart/data": "^1.2.1",
"@eslint/compat": "^1.2.4",
"@floating-ui/react": "^0.26.25",
"@formatjs/intl-localematcher": "^0.5.6",
"@headlessui/react": "2.2.1",
@ -63,7 +62,6 @@
"@octokit/request-error": "^6.1.5",
"@remixicon/react": "^4.5.0",
"@sentry/react": "^8.54.0",
"@sentry/utils": "^8.54.0",
"@svgdotjs/svg.js": "^3.2.4",
"@tailwindcss/typography": "^0.5.15",
"@tanstack/react-form": "^1.3.3",
@ -76,7 +74,6 @@
"cmdk": "^1.1.1",
"copy-to-clipboard": "^3.3.3",
"cron-parser": "^5.4.0",
"crypto-js": "^4.2.0",
"dayjs": "^1.11.13",
"decimal.js": "^10.4.3",
"dompurify": "^3.2.4",
@ -92,7 +89,6 @@
"js-audio-recorder": "^1.0.7",
"js-cookie": "^3.0.5",
"jsonschema": "^1.5.0",
"jwt-decode": "^4.0.0",
"katex": "^0.16.21",
"ky": "^1.7.2",
"lamejs": "^1.2.1",
@ -113,12 +109,9 @@
"react-18-input-autosize": "^3.0.0",
"react-dom": "19.1.1",
"react-easy-crop": "^5.1.0",
"react-error-boundary": "^4.1.2",
"react-headless-pagination": "^1.1.6",
"react-hook-form": "^7.53.1",
"react-hotkeys-hook": "^4.6.1",
"react-i18next": "^15.1.0",
"react-infinite-scroll-component": "^6.1.0",
"react-markdown": "^9.0.1",
"react-multi-email": "^1.0.25",
"react-papaparse": "^4.4.0",
@ -127,11 +120,8 @@
"react-sortablejs": "^6.1.4",
"react-syntax-highlighter": "^15.6.1",
"react-textarea-autosize": "^8.5.8",
"react-tooltip": "5.8.3",
"react-window": "^1.8.10",
"react-window-infinite-loader": "^1.0.9",
"reactflow": "^11.11.3",
"recordrtc": "^5.6.2",
"rehype-katex": "^7.0.1",
"rehype-raw": "^7.0.0",
"remark-breaks": "^4.0.0",
@ -139,9 +129,7 @@
"remark-math": "^6.0.0",
"scheduler": "^0.26.0",
"semver": "^7.6.3",
"server-only": "^0.0.1",
"sharp": "^0.33.2",
"shave": "^5.0.4",
"sortablejs": "^1.15.0",
"swr": "^2.3.0",
"tailwind-merge": "^2.5.4",
@ -154,13 +142,8 @@
},
"devDependencies": {
"@antfu/eslint-config": "^5.0.0",
"@babel/core": "^7.28.3",
"@babel/preset-env": "^7.28.3",
"@chromatic-com/storybook": "^3.1.0",
"@eslint-react/eslint-plugin": "^1.15.0",
"@eslint/eslintrc": "^3.1.0",
"@eslint/js": "^9.36.0",
"@faker-js/faker": "^9.0.3",
"@happy-dom/jest-environment": "^17.4.4",
"@mdx-js/loader": "^3.1.0",
"@mdx-js/react": "^3.1.0",
@ -173,14 +156,13 @@
"@storybook/addon-links": "8.5.0",
"@storybook/addon-onboarding": "8.5.0",
"@storybook/addon-themes": "8.5.0",
"@storybook/blocks": "8.5.0",
"@storybook/nextjs": "8.5.0",
"@storybook/react": "8.5.0",
"@storybook/test": "8.5.0",
"@testing-library/dom": "^10.4.0",
"@testing-library/jest-dom": "^6.8.0",
"@testing-library/react": "^16.0.1",
"@types/crypto-js": "^4.2.2",
"@babel/core": "^7.28.3",
"@types/dagre": "^0.7.52",
"@types/jest": "^29.5.13",
"@types/js-cookie": "^3.0.6",
@ -193,18 +175,14 @@
"@types/react-slider": "^1.3.6",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/react-window": "^1.8.8",
"@types/react-window-infinite-loader": "^1.0.9",
"@types/recordrtc": "^5.6.14",
"@types/semver": "^7.5.8",
"@types/sortablejs": "^1.15.1",
"@types/uuid": "^10.0.0",
"autoprefixer": "^10.4.20",
"babel-loader": "^10.0.0",
"bing-translate-api": "^4.0.2",
"code-inspector-plugin": "1.2.9",
"cross-env": "^7.0.3",
"eslint": "^9.35.0",
"eslint-config-next": "15.5.0",
"eslint-plugin-oxlint": "^1.6.0",
"eslint-plugin-react-hooks": "^5.1.0",
"eslint-plugin-react-refresh": "^0.4.19",
@ -214,6 +192,7 @@
"globals": "^15.11.0",
"husky": "^9.1.6",
"jest": "^29.7.0",
"knip": "^5.64.1",
"lint-staged": "^15.2.10",
"lodash": "^4.17.21",
"magicast": "^0.3.4",
@ -221,10 +200,9 @@
"sass": "^1.92.1",
"storybook": "8.5.0",
"tailwindcss": "^3.4.14",
"ts-node": "^10.9.2",
"typescript": "^5.8.3",
"typescript-eslint": "^8.38.0",
"uglify-js": "^3.19.3"
"uglify-js": "^3.19.3",
"babel-loader": "^9.2.1"
},
"resolutions": {
"@types/react": "19.1.11",

File diff suppressed because it is too large Load Diff