Merge branch 'main' into feat/enchance-warn-user-time-when-need-upgrade-plan

This commit is contained in:
twwu 2025-12-11 10:33:39 +08:00
commit c49127540f
45 changed files with 1206 additions and 1075 deletions

View File

@ -0,0 +1,21 @@
name: Semantic Pull Request
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
jobs:
lint:
name: Validate PR title
permissions:
pull-requests: read
runs-on: ubuntu-latest
steps:
- name: Check title
uses: amannn/action-semantic-pull-request@v6.1.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -4,7 +4,7 @@ from uuid import UUID
from flask import request
from flask_restx import Resource
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, field_validator
from werkzeug.exceptions import BadRequest, InternalServerError, NotFound
import services
@ -52,11 +52,23 @@ class ChatRequestPayload(BaseModel):
query: str
files: list[dict[str, Any]] | None = None
response_mode: Literal["blocking", "streaming"] | None = None
conversation_id: UUID | None = None
conversation_id: str | None = Field(default=None, description="Conversation UUID")
retriever_from: str = Field(default="dev")
auto_generate_name: bool = Field(default=True, description="Auto generate conversation name")
workflow_id: str | None = Field(default=None, description="Workflow ID for advanced chat")
@field_validator("conversation_id", mode="before")
@classmethod
def normalize_conversation_id(cls, value: str | UUID | None) -> str | None:
"""Allow missing or blank conversation IDs; enforce UUID format when provided."""
if not value:
return None
try:
return helper.uuid_value(value)
except ValueError as exc:
raise ValueError("conversation_id must be a valid UUID") from exc
register_schema_models(service_api_ns, CompletionRequestPayload, ChatRequestPayload)

View File

@ -451,12 +451,21 @@ class RetrievalService:
"position": child_chunk.position,
"score": document.metadata.get("score", 0.0),
}
segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
segment_child_map[segment.id]["max_score"] = max(
segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
)
if segment.id in segment_child_map:
segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail)
segment_child_map[segment.id]["max_score"] = max(
segment_child_map[segment.id]["max_score"], document.metadata.get("score", 0.0)
)
else:
segment_child_map[segment.id] = {
"max_score": document.metadata.get("score", 0.0),
"child_chunks": [child_chunk_detail],
}
if attachment_info:
segment_file_map[segment.id].append(attachment_info)
if segment.id in segment_file_map:
segment_file_map[segment.id].append(attachment_info)
else:
segment_file_map[segment.id] = [attachment_info]
else:
# Handle normal documents
segment = None

View File

@ -209,7 +209,7 @@ class ParagraphIndexProcessor(BaseIndexProcessor):
if dataset.indexing_technique == "high_quality":
vector = Vector(dataset)
vector.create(documents)
if all_multimodal_documents:
if all_multimodal_documents and dataset.is_multimodal:
vector.create_multimodal(all_multimodal_documents)
elif dataset.indexing_technique == "economy":
keyword = Keyword(dataset)

View File

@ -312,7 +312,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor):
vector = Vector(dataset)
if all_child_documents:
vector.create(all_child_documents)
if all_multimodal_documents:
if all_multimodal_documents and dataset.is_multimodal:
vector.create_multimodal(all_multimodal_documents)
def format_preview(self, chunks: Any) -> Mapping[str, Any]:

View File

@ -13,5 +13,5 @@ def remove_leading_symbols(text: str) -> str:
"""
# Match Unicode ranges for punctuation and symbols
# FIXME this pattern is confused quick fix for #11868 maybe refactor it later
pattern = r"^[\u2000-\u206F\u2E00-\u2E7F\u3000-\u303F\"#$%&'()*+,./:;<=>?@^_`~]+"
pattern = r'^[\[\]\u2000-\u2025\u2027-\u206F\u2E00-\u2E7F\u3000-\u300F\u3011-\u303F"#$%&\'()*+,./:;<=>?@^_`~]+'
return re.sub(pattern, "", text)

View File

@ -221,7 +221,7 @@ class WorkflowToolProviderController(ToolProviderController):
session.query(WorkflowToolProvider)
.where(
WorkflowToolProvider.tenant_id == tenant_id,
WorkflowToolProvider.app_id == self.provider_id,
WorkflowToolProvider.id == self.provider_id,
)
.first()
)

View File

@ -412,16 +412,20 @@ class Executor:
body_string += f"--{boundary}\r\n"
body_string += f'Content-Disposition: form-data; name="{key}"\r\n\r\n'
# decode content safely
try:
body_string += content.decode("utf-8")
except UnicodeDecodeError:
body_string += content.decode("utf-8", errors="replace")
body_string += "\r\n"
# Do not decode binary content; use a placeholder with file metadata instead.
# Includes filename, size, and MIME type for better logging context.
body_string += (
f"<file_content_binary: '{file_entry[1][0] or 'unknown'}', "
f"type='{file_entry[1][2] if len(file_entry[1]) > 2 else 'unknown'}', "
f"size={len(content)} bytes>\r\n"
)
body_string += f"--{boundary}--\r\n"
elif self.node_data.body:
if self.content:
# If content is bytes, do not decode it; show a placeholder with size.
# Provides content size information for binary data without exposing the raw bytes.
if isinstance(self.content, bytes):
body_string = self.content.decode("utf-8", errors="replace")
body_string = f"<binary_content: size={len(self.content)} bytes>"
else:
body_string = self.content
elif self.data and self.node_data.body.type == "x-www-form-urlencoded":

View File

@ -107,7 +107,7 @@ def email(email):
EmailStr = Annotated[str, AfterValidator(email)]
def uuid_value(value):
def uuid_value(value: Any) -> str:
if value == "":
return str(value)

View File

@ -1,6 +1,6 @@
[project]
name = "dify-api"
version = "1.10.1"
version = "1.11.0"
requires-python = ">=3.11,<3.13"
dependencies = [
@ -151,7 +151,7 @@ dev = [
"types-pywin32~=310.0.0",
"types-pyyaml~=6.0.12",
"types-regex~=2024.11.6",
"types-shapely~=2.0.0",
"types-shapely~=2.1.0",
"types-simplejson>=3.20.0",
"types-six>=1.17.0",
"types-tensorflow>=2.18.0",

View File

@ -673,6 +673,8 @@ class DatasetService:
Returns:
str: Action to perform ('add', 'remove', 'update', or None)
"""
if "indexing_technique" not in data:
return None
if dataset.indexing_technique != data["indexing_technique"]:
if data["indexing_technique"] == "economy":
# Remove embedding model configuration for economy mode

View File

@ -70,9 +70,28 @@ class ModelProviderService:
continue
provider_config = provider_configuration.custom_configuration.provider
model_config = provider_configuration.custom_configuration.models
models = provider_configuration.custom_configuration.models
can_added_models = provider_configuration.custom_configuration.can_added_models
# IMPORTANT: Never expose decrypted credentials in the provider list API.
# Sanitize custom model configurations by dropping the credentials payload.
sanitized_model_config = []
if models:
from core.entities.provider_entities import CustomModelConfiguration # local import to avoid cycles
for model in models:
sanitized_model_config.append(
CustomModelConfiguration(
model=model.model,
model_type=model.model_type,
credentials=None, # strip secrets from list view
current_credential_id=model.current_credential_id,
current_credential_name=model.current_credential_name,
available_model_credentials=model.available_model_credentials,
unadded_to_model_list=model.unadded_to_model_list,
)
)
provider_response = ProviderResponse(
tenant_id=tenant_id,
provider=provider_configuration.provider.provider,
@ -95,7 +114,7 @@ class ModelProviderService:
current_credential_id=getattr(provider_config, "current_credential_id", None),
current_credential_name=getattr(provider_config, "current_credential_name", None),
available_credentials=getattr(provider_config, "available_credentials", []),
custom_models=model_config,
custom_models=sanitized_model_config,
can_added_models=can_added_models,
),
system_configuration=SystemConfigurationResponse(

View File

@ -0,0 +1,25 @@
import uuid
import pytest
from pydantic import ValidationError
from controllers.service_api.app.completion import ChatRequestPayload
def test_chat_request_payload_accepts_blank_conversation_id():
payload = ChatRequestPayload.model_validate({"inputs": {}, "query": "hello", "conversation_id": ""})
assert payload.conversation_id is None
def test_chat_request_payload_validates_uuid():
conversation_id = str(uuid.uuid4())
payload = ChatRequestPayload.model_validate({"inputs": {}, "query": "hello", "conversation_id": conversation_id})
assert payload.conversation_id == conversation_id
def test_chat_request_payload_rejects_invalid_uuid():
with pytest.raises(ValidationError):
ChatRequestPayload.model_validate({"inputs": {}, "query": "hello", "conversation_id": "invalid"})

View File

@ -0,0 +1,88 @@
import types
import pytest
from core.entities.provider_entities import CredentialConfiguration, CustomModelConfiguration
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ConfigurateMethod
from models.provider import ProviderType
from services.model_provider_service import ModelProviderService
class _FakeConfigurations:
def __init__(self, provider_configuration: types.SimpleNamespace) -> None:
self._provider_configuration = provider_configuration
def values(self) -> list[types.SimpleNamespace]:
return [self._provider_configuration]
@pytest.fixture
def service_with_fake_configurations():
# Build a fake provider schema with minimal fields used by ProviderResponse
fake_provider = types.SimpleNamespace(
provider="langgenius/openai_api_compatible/openai_api_compatible",
label=I18nObject(en_US="OpenAI API Compatible", zh_Hans="OpenAI API Compatible"),
description=None,
icon_small=None,
icon_small_dark=None,
icon_large=None,
background=None,
help=None,
supported_model_types=[ModelType.LLM],
configurate_methods=[ConfigurateMethod.CUSTOMIZABLE_MODEL],
provider_credential_schema=None,
model_credential_schema=None,
)
# Include decrypted credentials to simulate the leak source
custom_model = CustomModelConfiguration(
model="gpt-4o-mini",
model_type=ModelType.LLM,
credentials={"api_key": "sk-plain-text", "endpoint": "https://example.com"},
current_credential_id="cred-1",
current_credential_name="API KEY 1",
available_model_credentials=[],
unadded_to_model_list=False,
)
fake_custom_provider = types.SimpleNamespace(
current_credential_id="cred-1",
current_credential_name="API KEY 1",
available_credentials=[CredentialConfiguration(credential_id="cred-1", credential_name="API KEY 1")],
)
fake_custom_configuration = types.SimpleNamespace(
provider=fake_custom_provider, models=[custom_model], can_added_models=[]
)
fake_system_configuration = types.SimpleNamespace(enabled=False, current_quota_type=None, quota_configurations=[])
fake_provider_configuration = types.SimpleNamespace(
provider=fake_provider,
preferred_provider_type=ProviderType.CUSTOM,
custom_configuration=fake_custom_configuration,
system_configuration=fake_system_configuration,
is_custom_configuration_available=lambda: True,
)
class _FakeProviderManager:
def get_configurations(self, tenant_id: str) -> _FakeConfigurations:
return _FakeConfigurations(fake_provider_configuration)
svc = ModelProviderService()
svc.provider_manager = _FakeProviderManager()
return svc
def test_get_provider_list_strips_credentials(service_with_fake_configurations: ModelProviderService):
providers = service_with_fake_configurations.get_provider_list(tenant_id="tenant-1", model_type=None)
assert len(providers) == 1
custom_models = providers[0].custom_configuration.custom_models
assert custom_models is not None
assert len(custom_models) == 1
# The sanitizer should drop credentials in list response
assert custom_models[0].credentials is None

View File

@ -14,6 +14,7 @@ from core.tools.utils.text_processing_utils import remove_leading_symbols
("Hello, World!", "Hello, World!"),
("", ""),
(" ", " "),
("【测试】", "【测试】"),
],
)
def test_remove_leading_symbols(input_text, expected_output):

View File

@ -1337,7 +1337,7 @@ wheels = [
[[package]]
name = "dify-api"
version = "1.10.1"
version = "1.11.0"
source = { virtual = "." }
dependencies = [
{ name = "apscheduler" },
@ -1681,7 +1681,7 @@ dev = [
{ name = "types-redis", specifier = ">=4.6.0.20241004" },
{ name = "types-regex", specifier = "~=2024.11.6" },
{ name = "types-setuptools", specifier = ">=80.9.0" },
{ name = "types-shapely", specifier = "~=2.0.0" },
{ name = "types-shapely", specifier = "~=2.1.0" },
{ name = "types-simplejson", specifier = ">=3.20.0" },
{ name = "types-six", specifier = ">=1.17.0" },
{ name = "types-tensorflow", specifier = ">=2.18.0" },
@ -6557,14 +6557,14 @@ wheels = [
[[package]]
name = "types-shapely"
version = "2.0.0.20250404"
version = "2.1.0.20250917"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
]
sdist = { url = "https://files.pythonhosted.org/packages/4e/55/c71a25fd3fc9200df4d0b5fd2f6d74712a82f9a8bbdd90cefb9e6aee39dd/types_shapely-2.0.0.20250404.tar.gz", hash = "sha256:863f540b47fa626c33ae64eae06df171f9ab0347025d4458d2df496537296b4f", size = 25066, upload-time = "2025-04-04T02:54:30.592Z" }
sdist = { url = "https://files.pythonhosted.org/packages/fa/19/7f28b10994433d43b9caa66f3b9bd6a0a9192b7ce8b5a7fc41534e54b821/types_shapely-2.1.0.20250917.tar.gz", hash = "sha256:5c56670742105aebe40c16414390d35fcaa55d6f774d328c1a18273ab0e2134a", size = 26363, upload-time = "2025-09-17T02:47:44.604Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ce/ff/7f4d414eb81534ba2476f3d54f06f1463c2ebf5d663fd10cff16ba607dd6/types_shapely-2.0.0.20250404-py3-none-any.whl", hash = "sha256:170fb92f5c168a120db39b3287697fdec5c93ef3e1ad15e52552c36b25318821", size = 36350, upload-time = "2025-04-04T02:54:29.506Z" },
{ url = "https://files.pythonhosted.org/packages/e5/a9/554ac40810e530263b6163b30a2b623bc16aae3fb64416f5d2b3657d0729/types_shapely-2.1.0.20250917-py3-none-any.whl", hash = "sha256:9334a79339504d39b040426be4938d422cec419168414dc74972aa746a8bf3a1", size = 37813, upload-time = "2025-09-17T02:47:43.788Z" },
]
[[package]]

View File

@ -21,7 +21,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -62,7 +62,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -101,7 +101,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -131,7 +131,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.10.1-fix.1
image: langgenius/dify-web:1.11.0
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@ -268,7 +268,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.4.1-local
image: langgenius/dify-plugin-daemon:0.5.1-local
restart: always
environment:
# Use the shared environment variables.

View File

@ -123,7 +123,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.4.1-local
image: langgenius/dify-plugin-daemon:0.5.1-local
restart: always
env_file:
- ./middleware.env

View File

@ -658,7 +658,7 @@ services:
# API service
api:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -699,7 +699,7 @@ services:
# worker service
# The Celery worker for processing all queues (dataset, workflow, mail, etc.)
worker:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -738,7 +738,7 @@ services:
# worker_beat service
# Celery beat for scheduling periodic tasks.
worker_beat:
image: langgenius/dify-api:1.10.1-fix.1
image: langgenius/dify-api:1.11.0
restart: always
environment:
# Use the shared environment variables.
@ -768,7 +768,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:1.10.1-fix.1
image: langgenius/dify-web:1.11.0
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
@ -905,7 +905,7 @@ services:
# plugin daemon
plugin_daemon:
image: langgenius/dify-plugin-daemon:0.4.1-local
image: langgenius/dify-plugin-daemon:0.5.1-local
restart: always
environment:
# Use the shared environment variables.

View File

@ -328,6 +328,7 @@ const StepOne = ({
crawlOptions={crawlOptions}
onCrawlOptionsChange={onCrawlOptionsChange}
authedDataSourceList={authedDataSourceList}
supportBatchUpload={supportBatchUpload}
/>
</div>
{isShowVectorSpaceFull && (

View File

@ -70,10 +70,10 @@ export const isSystemVar = (valueSelector: ValueSelector) => {
}
export const isGlobalVar = (valueSelector: ValueSelector) => {
if(!isSystemVar(valueSelector)) return false
if (!isSystemVar(valueSelector)) return false
const second = valueSelector[1]
if(['query', 'files'].includes(second))
if (['query', 'files'].includes(second))
return false
return true
}
@ -1296,7 +1296,7 @@ export const getNodeUsedVars = (node: Node): ValueSelector[] => {
case BlockEnum.KnowledgeRetrieval: {
const {
query_variable_selector,
query_attachment_selector,
query_attachment_selector = [],
} = data as KnowledgeRetrievalNodeType
res = [query_variable_selector, query_attachment_selector]
break
@ -1638,7 +1638,7 @@ export const updateNodeVars = (
)
payload.query_variable_selector = newVarSelector
if (
payload.query_attachment_selector.join('.') === oldVarSelector.join('.')
payload.query_attachment_selector?.join('.') === oldVarSelector.join('.')
)
payload.query_attachment_selector = newVarSelector
break

View File

@ -80,7 +80,7 @@ const useSingleRunFormParams = ({
},
]
if (hasMultiModalDatasets) {
const currentVariable = findVariableWhenOnLLMVision(payload.query_attachment_selector, availableFileVars)
const currentVariable = findVariableWhenOnLLMVision(payload.query_attachment_selector || [], availableFileVars)
inputFields.push(
{
inputs: [{
@ -98,13 +98,13 @@ const useSingleRunFormParams = ({
}, [query, setQuery, t, datasetsDetail, payload.dataset_ids, payload.query_attachment_selector, availableFileVars, queryAttachment, setQueryAttachment])
const getDependentVars = () => {
return [payload.query_variable_selector, payload.query_attachment_selector]
return [payload.query_variable_selector, payload.query_attachment_selector || []]
}
const getDependentVar = (variable: string) => {
if (variable === 'query')
return payload.query_variable_selector
if (variable === 'queryAttachment')
return payload.query_attachment_selector
return payload.query_attachment_selector || []
}
return {

View File

@ -12,8 +12,9 @@ describe('useAsyncWindowOpen', () => {
window.open = originalOpen
})
it('opens immediate url synchronously without calling async getter', async () => {
const openSpy = jest.fn()
it('opens immediate url synchronously, clears opener, without calling async getter', async () => {
const mockWindow: any = { opener: 'should-clear' }
const openSpy = jest.fn(() => mockWindow)
window.open = openSpy
const getUrl = jest.fn()
const { result } = renderHook(() => useAsyncWindowOpen())
@ -22,12 +23,54 @@ describe('useAsyncWindowOpen', () => {
await result.current(getUrl, {
immediateUrl: 'https://example.com',
target: '_blank',
features: 'noopener,noreferrer',
features: undefined,
})
})
expect(openSpy).toHaveBeenCalledWith('https://example.com', '_blank', 'noopener,noreferrer')
expect(getUrl).not.toHaveBeenCalled()
expect(mockWindow.opener).toBeNull()
})
it('appends noopener,noreferrer when immediate open passes custom features', async () => {
const mockWindow: any = { opener: 'should-clear' }
const openSpy = jest.fn(() => mockWindow)
window.open = openSpy
const getUrl = jest.fn()
const { result } = renderHook(() => useAsyncWindowOpen())
await act(async () => {
await result.current(getUrl, {
immediateUrl: 'https://example.com',
target: '_blank',
features: 'width=500',
})
})
expect(openSpy).toHaveBeenCalledWith('https://example.com', '_blank', 'width=500,noopener,noreferrer')
expect(getUrl).not.toHaveBeenCalled()
expect(mockWindow.opener).toBeNull()
})
it('reports error when immediate window fails to open', async () => {
const openSpy = jest.fn(() => null)
window.open = openSpy
const getUrl = jest.fn()
const onError = jest.fn()
const { result } = renderHook(() => useAsyncWindowOpen())
await act(async () => {
await result.current(getUrl, {
immediateUrl: 'https://example.com',
target: '_blank',
onError,
})
})
expect(onError).toHaveBeenCalled()
const errArg = onError.mock.calls[0][0] as Error
expect(errArg.message).toBe('Failed to open new window')
expect(getUrl).not.toHaveBeenCalled()
})
it('sets opener to null and redirects when async url resolves', async () => {
@ -75,6 +118,30 @@ describe('useAsyncWindowOpen', () => {
expect(mockWindow.location.href).toBe('')
})
it('preserves custom features as-is for async open', async () => {
const close = jest.fn()
const mockWindow: any = {
location: { href: '' },
close,
opener: 'should-be-cleared',
}
const openSpy = jest.fn(() => mockWindow)
window.open = openSpy
const { result } = renderHook(() => useAsyncWindowOpen())
await act(async () => {
await result.current(async () => 'https://example.com/path', {
target: '_blank',
features: 'width=500',
})
})
expect(openSpy).toHaveBeenCalledWith('about:blank', '_blank', 'width=500')
expect(mockWindow.opener).toBeNull()
expect(mockWindow.location.href).toBe('https://example.com/path')
expect(close).not.toHaveBeenCalled()
})
it('closes placeholder and reports when no url is returned', async () => {
const close = jest.fn()
const mockWindow: any = {

View File

@ -17,8 +17,18 @@ export const useAsyncWindowOpen = () => useCallback(async (getUrl: GetUrl, optio
onError,
} = options ?? {}
const secureImmediateFeatures = features ? `${features},noopener,noreferrer` : 'noopener,noreferrer'
if (immediateUrl) {
window.open(immediateUrl, target, features)
const newWindow = window.open(immediateUrl, target, secureImmediateFeatures)
if (!newWindow) {
onError?.(new Error('Failed to open new window'))
return
}
try {
newWindow.opener = null
}
catch { /* noop */ }
return
}

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} ist nicht verbunden',
notConnectedTip: 'Um mit {{name}} zu synchronisieren, muss zuerst eine Verbindung zu {{name}} hergestellt werden.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Bestätigung',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} no está conectado',
notConnectedTip: 'Para sincronizar con {{name}}, primero se debe establecer conexión con {{name}}.',
},
credentialSelector: {
name: '{{credentialName}} de {{pluginName}}',
},
conversion: {
confirm: {
title: 'Confirmación',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} متصل نیست',
notConnectedTip: 'برای همگام‌سازی با {{name}}، ابتدا باید اتصال به {{name}} برقرار شود.',
},
credentialSelector: {
name: '{{pluginName}} {{credentialName}}',
},
conversion: {
confirm: {
title: 'تایید',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} n\'est pas connecté',
notConnectedTip: 'Pour se synchroniser avec {{name}}, une connexion à {{name}} doit d\'abord être établie.',
},
credentialSelector: {
name: '{{credentialName}} de {{pluginName}}',
},
conversion: {
confirm: {
title: 'Confirmation',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} कनेक्ट नहीं है',
notConnectedTip: '{{name}} के साथ सिंक करने के लिए, पहले {{name}} से कनेक्शन स्थापित करना आवश्यक है।',
},
credentialSelector: {
name: '{{credentialName}} का {{pluginName}}',
},
conversion: {
confirm: {
title: 'पुष्टि',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} tidak terhubung',
notConnectedTip: 'Untuk menyinkronkan dengan {{name}}, koneksi ke {{name}} harus dibuat terlebih dahulu.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Konfirmasi',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} non è connesso',
notConnectedTip: 'Per sincronizzarsi con {{name}}, è necessario prima stabilire la connessione a {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
content: 'Questa azione è permanente. Non sarà possibile ripristinare il metodo precedente. Si prega di confermare per convertire.',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}}가 연결되어 있지 않습니다',
notConnectedTip: '{{name}}와(과) 동기화하려면 먼저 {{name}}에 연결해야 합니다.',
},
credentialSelector: {
name: '{{credentialName}}의 {{pluginName}}',
},
conversion: {
confirm: {
title: '확인',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} nie jest połączony',
notConnectedTip: 'Aby zsynchronizować się z {{name}}, najpierw należy nawiązać połączenie z {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Potwierdzenie',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} não está conectado',
notConnectedTip: 'Para sincronizar com {{name}}, a conexão com {{name}} deve ser estabelecida primeiro.',
},
credentialSelector: {
name: '{{credentialName}} de {{pluginName}}',
},
conversion: {
confirm: {
title: 'Confirmação',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} nu este conectat',
notConnectedTip: 'Pentru a sincroniza cu {{name}}, trebuie mai întâi să se stabilească conexiunea cu {{name}}.',
},
credentialSelector: {
name: '{{pluginName}} al/a lui {{credentialName}}',
},
conversion: {
confirm: {
title: 'Confirmare',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} не подключен',
notConnectedTip: 'Чтобы синхронизироваться с {{name}}, сначала необходимо установить соединение с {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Подтверждение',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} ni povezan',
notConnectedTip: 'Za sinhronizacijo z {{name}} je treba najprej vzpostaviti povezavo z {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Potrditev',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} ไม่ได้เชื่อมต่อ',
notConnectedTip: 'เพื่อซิงค์กับ {{name}} ต้องสร้างการเชื่อมต่อกับ {{name}} ก่อน',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'การยืนยัน',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} bağlı değil',
notConnectedTip: '{{name}} ile senkronize olmak için önce {{name}} bağlantısının kurulması gerekir.',
},
credentialSelector: {
name: '{{credentialName}}\'un {{pluginName}}',
},
conversion: {
confirm: {
title: 'Onay',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} не підключено',
notConnectedTip: 'Щоб синхронізувати з {{name}}, спершу потрібно встановити з’єднання з {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Підтвердження',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} không được kết nối',
notConnectedTip: 'Để đồng bộ với {{name}}, trước tiên phải thiết lập kết nối với {{name}}.',
},
credentialSelector: {
name: '{{credentialName}}\'s {{pluginName}}',
},
conversion: {
confirm: {
title: 'Sự xác nhận',

View File

@ -137,9 +137,6 @@ const translation = {
notConnected: '{{name}} 未連接',
notConnectedTip: '要與 {{name}} 同步,必須先建立與 {{name}} 的連線。',
},
credentialSelector: {
name: '{{credentialName}}的{{pluginName}}',
},
conversion: {
confirm: {
title: '證實',

View File

@ -1,8 +1,8 @@
{
"name": "dify-web",
"version": "1.10.1",
"version": "1.11.0",
"private": true,
"packageManager": "pnpm@10.24.0+sha512.01ff8ae71b4419903b65c60fb2dc9d34cf8bb6e06d03bde112ef38f7a34d6904c424ba66bea5cdcf12890230bf39f9580473140ed9c946fef328b6e5238a345a",
"packageManager": "pnpm@10.25.0+sha512.5e82639027af37cf832061bcc6d639c219634488e0f2baebe785028a793de7b525ffcd3f7ff574f5e9860654e098fe852ba8ac5dd5cefe1767d23a020a92f501",
"engines": {
"node": ">=v22.11.0"
},
@ -184,6 +184,7 @@
"@types/semver": "^7.7.1",
"@types/sortablejs": "^1.15.8",
"@types/uuid": "^10.0.0",
"@typescript-eslint/parser": "^8.48.0",
"@typescript/native-preview": "^7.0.0-dev",
"autoprefixer": "^10.4.21",
"babel-loader": "^10.0.0",

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,9 @@
const fs = require('node:fs')
const path = require('node:path')
const { Linter } = require('eslint')
const sonarPlugin = require('eslint-plugin-sonarjs')
const tsParser = require('@typescript-eslint/parser')
// ============================================================================
// Simple Analyzer
@ -12,7 +15,11 @@ class ComponentAnalyzer {
const resolvedPath = absolutePath ?? path.resolve(process.cwd(), filePath)
const fileName = path.basename(filePath, path.extname(filePath))
const lineCount = code.split('\n').length
const complexity = this.calculateComplexity(code, lineCount)
// Calculate complexity metrics
const { total: rawComplexity, max: rawMaxComplexity } = this.calculateCognitiveComplexity(code)
const complexity = this.normalizeComplexity(rawComplexity)
const maxComplexity = this.normalizeComplexity(rawMaxComplexity)
// Count usage references (may take a few seconds)
const usageCount = this.countUsageReferences(filePath, resolvedPath)
@ -41,6 +48,9 @@ class ComponentAnalyzer {
hasReactQuery: code.includes('useQuery') || code.includes('useMutation'),
hasAhooks: code.includes("from 'ahooks'"),
complexity,
maxComplexity,
rawComplexity,
rawMaxComplexity,
lineCount,
usageCount,
priority,
@ -64,193 +74,96 @@ class ComponentAnalyzer {
}
/**
* Calculate component complexity score
* Based on Cognitive Complexity + React-specific metrics
* Calculate Cognitive Complexity using SonarJS ESLint plugin
* Reference: https://www.sonarsource.com/blog/5-clean-code-tips-for-reducing-cognitive-complexity/
*
* Score Ranges:
* 0-10: 🟢 Simple (5-10 min to test)
* 11-30: 🟡 Medium (15-30 min to test)
* 31-50: 🟠 Complex (30-60 min to test)
* 51+: 🔴 Very Complex (60+ min, consider splitting)
* Returns raw (unnormalized) complexity values:
* - total: sum of all functions' complexity in the file
* - max: highest single function complexity in the file
*
* Raw Score Thresholds (per function):
* 0-15: Simple | 16-30: Medium | 31-50: Complex | 51+: Very Complex
*
* @returns {{ total: number, max: number }} raw total and max complexity
*/
calculateComplexity(code, lineCount) {
let score = 0
calculateCognitiveComplexity(code) {
const linter = new Linter()
const baseConfig = {
languageOptions: {
parser: tsParser,
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
ecmaFeatures: { jsx: true },
},
},
plugins: { sonarjs: sonarPlugin },
}
const count = pattern => this.countMatches(code, pattern)
try {
// Get total complexity using 'metric' option (more stable)
const totalConfig = {
...baseConfig,
rules: { 'sonarjs/cognitive-complexity': ['error', 0, 'metric'] },
}
const totalMessages = linter.verify(code, totalConfig)
const totalMsg = totalMessages.find(
msg => msg.ruleId === 'sonarjs/cognitive-complexity'
&& msg.messageId === 'fileComplexity',
)
const total = totalMsg ? parseInt(totalMsg.message, 10) : 0
// ===== React Hooks (State Management Complexity) =====
const stateHooks = count(/useState/g)
const reducerHooks = count(/useReducer/g)
const effectHooks = count(/useEffect/g)
const callbackHooks = count(/useCallback/g)
const memoHooks = count(/useMemo/g)
const refHooks = count(/useRef/g)
const imperativeHandleHooks = count(/useImperativeHandle/g)
// Get max function complexity by analyzing each function
const maxConfig = {
...baseConfig,
rules: { 'sonarjs/cognitive-complexity': ['error', 0] },
}
const maxMessages = linter.verify(code, maxConfig)
let max = 0
const complexityPattern = /reduce its Cognitive Complexity from (\d+)/
const builtinHooks = stateHooks + reducerHooks + effectHooks
+ callbackHooks + memoHooks + refHooks + imperativeHandleHooks
const totalHooks = count(/use[A-Z]\w+/g)
const customHooks = Math.max(0, totalHooks - builtinHooks)
maxMessages.forEach((msg) => {
if (msg.ruleId === 'sonarjs/cognitive-complexity') {
const match = msg.message.match(complexityPattern)
if (match && match[1])
max = Math.max(max, parseInt(match[1], 10))
}
})
score += stateHooks * 5 // Each state +5 (need to test state changes)
score += reducerHooks * 6 // Each reducer +6 (complex state management)
score += effectHooks * 6 // Each effect +6 (need to test deps & cleanup)
score += callbackHooks * 2 // Each callback +2
score += memoHooks * 2 // Each memo +2
score += refHooks * 1 // Each ref +1
score += imperativeHandleHooks * 4 // Each imperative handle +4 (exposes methods)
score += customHooks * 3 // Each custom hook +3
// ===== Control Flow Complexity (Cyclomatic Complexity) =====
score += count(/if\s*\(/g) * 2 // if statement
score += count(/else\s+if/g) * 2 // else if
score += count(/\?\s*[^:]+\s*:/g) * 1 // ternary operator
score += count(/switch\s*\(/g) * 3 // switch
score += count(/case\s+/g) * 1 // case branch
score += count(/&&/g) * 1 // logical AND
score += count(/\|\|/g) * 1 // logical OR
score += count(/\?\?/g) * 1 // nullish coalescing
// ===== Loop Complexity =====
score += count(/\.map\(/g) * 2 // map
score += count(/\.filter\(/g) * 1 // filter
score += count(/\.reduce\(/g) * 3 // reduce (complex)
score += count(/for\s*\(/g) * 2 // for loop
score += count(/while\s*\(/g) * 3 // while loop
// ===== Props and Events Complexity =====
// Count unique props from interface/type definitions only (avoid duplicates)
const propsCount = this.countUniqueProps(code)
score += Math.floor(propsCount / 2) // Every 2 props +1
// Count unique event handler names (avoid duplicates from type defs, params, usage)
const uniqueEventHandlers = this.countUniqueEventHandlers(code)
score += uniqueEventHandlers * 2 // Each unique event handler +2
// ===== API Call Complexity =====
score += count(/fetch\(/g) * 4 // fetch
score += count(/axios\./g) * 4 // axios
score += count(/useSWR/g) * 4 // SWR
score += count(/useQuery/g) * 4 // React Query
score += count(/\.then\(/g) * 2 // Promise
score += count(/await\s+/g) * 2 // async/await
// ===== Third-party Library Integration =====
// Only count complex UI libraries that require integration testing
// Data fetching libs (swr, react-query, ahooks) don't add complexity
// because they are already well-tested; we only need to mock them
const complexUILibs = [
{ pattern: /reactflow|ReactFlow/, weight: 15 },
{ pattern: /@monaco-editor/, weight: 12 },
{ pattern: /echarts/, weight: 8 },
{ pattern: /lexical/, weight: 10 },
]
complexUILibs.forEach(({ pattern, weight }) => {
if (pattern.test(code)) score += weight
})
// ===== Code Size Complexity =====
if (lineCount > 500) score += 10
else if (lineCount > 300) score += 6
else if (lineCount > 150) score += 3
// ===== Nesting Depth (deep nesting reduces readability) =====
const maxNesting = this.calculateNestingDepth(code)
score += Math.max(0, (maxNesting - 3)) * 2 // Over 3 levels, +2 per level
// ===== Context and Global State =====
score += count(/useContext/g) * 3
score += count(/useStore|useAppStore/g) * 4
score += count(/zustand|redux/g) * 3
// ===== React Advanced Features =====
score += count(/React\.memo|memo\(/g) * 2 // Component memoization
score += count(/forwardRef/g) * 3 // Ref forwarding
score += count(/Suspense/g) * 4 // Suspense boundaries
score += count(/\blazy\(/g) * 3 // Lazy loading
score += count(/createPortal/g) * 3 // Portal rendering
return Math.min(score, 100) // Max 100 points
return { total, max }
}
catch {
return { total: 0, max: 0 }
}
}
/**
* Calculate maximum nesting depth
* Normalize cognitive complexity to 0-100 scale
*
* Mapping (aligned with SonarJS thresholds):
* Raw 0-15 (Simple) -> Normalized 0-25
* Raw 16-30 (Medium) -> Normalized 25-50
* Raw 31-50 (Complex) -> Normalized 50-75
* Raw 51+ (Very Complex) -> Normalized 75-100 (asymptotic)
*/
calculateNestingDepth(code) {
let maxDepth = 0
let currentDepth = 0
let inString = false
let stringChar = ''
let escapeNext = false
let inSingleLineComment = false
let inMultiLineComment = false
for (let i = 0; i < code.length; i++) {
const char = code[i]
const nextChar = code[i + 1]
if (inSingleLineComment) {
if (char === '\n') inSingleLineComment = false
continue
}
if (inMultiLineComment) {
if (char === '*' && nextChar === '/') {
inMultiLineComment = false
i++
}
continue
}
if (inString) {
if (escapeNext) {
escapeNext = false
continue
}
if (char === '\\') {
escapeNext = true
continue
}
if (char === stringChar) {
inString = false
stringChar = ''
}
continue
}
if (char === '/' && nextChar === '/') {
inSingleLineComment = true
i++
continue
}
if (char === '/' && nextChar === '*') {
inMultiLineComment = true
i++
continue
}
if (char === '"' || char === '\'' || char === '`') {
inString = true
stringChar = char
continue
}
if (char === '{') {
currentDepth++
maxDepth = Math.max(maxDepth, currentDepth)
continue
}
if (char === '}') {
currentDepth = Math.max(currentDepth - 1, 0)
}
normalizeComplexity(rawComplexity) {
if (rawComplexity <= 15) {
// Linear: 0-15 -> 0-25
return Math.round((rawComplexity / 15) * 25)
}
else if (rawComplexity <= 30) {
// Linear: 16-30 -> 25-50
return Math.round(25 + ((rawComplexity - 15) / 15) * 25)
}
else if (rawComplexity <= 50) {
// Linear: 31-50 -> 50-75
return Math.round(50 + ((rawComplexity - 30) / 20) * 25)
}
else {
// Asymptotic: 51+ -> 75-100
// Formula ensures score approaches but never exceeds 100
return Math.round(75 + 25 * (1 - 1 / (1 + (rawComplexity - 50) / 100)))
}
return maxDepth
}
/**
@ -379,86 +292,41 @@ class ComponentAnalyzer {
return true
}
countMatches(code, pattern) {
const matches = code.match(pattern)
return matches ? matches.length : 0
}
/**
* Count unique props from interface/type definitions
* Only counts props defined in type/interface blocks, not usage
*/
countUniqueProps(code) {
const uniqueProps = new Set()
// Match interface or type definition blocks
const typeBlockPattern = /(?:interface|type)\s+\w*Props[^{]*\{([^}]+)\}/g
let match
while ((match = typeBlockPattern.exec(code)) !== null) {
const blockContent = match[1]
// Match prop names (word followed by optional ? and :)
const propPattern = /(\w+)\s*\??:/g
let propMatch
while ((propMatch = propPattern.exec(blockContent)) !== null) {
uniqueProps.add(propMatch[1])
}
}
return Math.min(uniqueProps.size, 20) // Max 20 props
}
/**
* Count unique event handler names (on[A-Z]...)
* Avoids counting the same handler multiple times across type defs, params, and usage
*/
countUniqueEventHandlers(code) {
const uniqueHandlers = new Set()
const pattern = /on[A-Z]\w+/g
let match
while ((match = pattern.exec(code)) !== null) {
uniqueHandlers.add(match[0])
}
return uniqueHandlers.size
}
static escapeRegExp(value) {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
}
/**
* Calculate test priority based on complexity and usage
* Calculate test priority based on cognitive complexity and usage
*
* Priority Score = Complexity Score + Usage Score
* - Complexity: 0-100
* - Usage: 0-50
* - Total: 0-150
* Priority Score = 0.7 * Complexity + 0.3 * Usage Score (all normalized to 0-100)
* - Complexity Score: 0-100 (normalized from SonarJS)
* - Usage Score: 0-100 (based on reference count)
*
* Priority Levels:
* - 0-30: Low
* - 31-70: Medium
* - 71-100: High
* - 100+: Critical
* Priority Levels (0-100):
* - 0-25: 🟢 LOW
* - 26-50: 🟡 MEDIUM
* - 51-75: 🟠 HIGH
* - 76-100: 🔴 CRITICAL
*/
calculateTestPriority(complexity, usageCount) {
const complexityScore = complexity
// Usage score calculation
// Normalize usage score to 0-100
let usageScore
if (usageCount === 0)
usageScore = 0
else if (usageCount <= 5)
usageScore = 10
else if (usageCount <= 20)
usageScore = 20
else if (usageCount <= 20)
usageScore = 40
else if (usageCount <= 50)
usageScore = 35
usageScore = 70
else
usageScore = 50
usageScore = 100
const totalScore = complexityScore + usageScore
// Weighted average: complexity (70%) + usage (30%)
const totalScore = Math.round(0.7 * complexityScore + 0.3 * usageScore)
return {
score: totalScore,
@ -469,12 +337,12 @@ class ComponentAnalyzer {
}
/**
* Get priority level based on score
* Get priority level based on score (0-100 scale)
*/
getPriorityLevel(score) {
if (score > 100) return '🔴 CRITICAL'
if (score > 70) return '🟠 HIGH'
if (score > 30) return '🟡 MEDIUM'
if (score > 75) return '🔴 CRITICAL'
if (score > 50) return '🟠 HIGH'
if (score > 25) return '🟡 MEDIUM'
return '🟢 LOW'
}
}
@ -498,10 +366,11 @@ class TestPromptBuilder {
📊 Component Analysis:
Type: ${analysis.type}
Complexity: ${analysis.complexity} ${this.getComplexityLevel(analysis.complexity)}
Lines: ${analysis.lineCount}
Usage: ${analysis.usageCount} reference${analysis.usageCount !== 1 ? 's' : ''}
Type: ${analysis.type}
Total Complexity: ${analysis.complexity}/100 ${this.getComplexityLevel(analysis.complexity)}
Max Func Complexity: ${analysis.maxComplexity}/100 ${this.getComplexityLevel(analysis.maxComplexity)}
Lines: ${analysis.lineCount}
Usage: ${analysis.usageCount} reference${analysis.usageCount !== 1 ? 's' : ''}
Test Priority: ${analysis.priority.score} ${analysis.priority.level}
Features Detected:
@ -549,10 +418,10 @@ Create the test file at: ${testPath}
}
getComplexityLevel(score) {
// Aligned with testing.md guidelines
if (score <= 10) return '🟢 Simple'
if (score <= 30) return '🟡 Medium'
if (score <= 50) return '🟠 Complex'
// Normalized complexity thresholds (0-100 scale)
if (score <= 25) return '🟢 Simple'
if (score <= 50) return '🟡 Medium'
if (score <= 75) return '🟠 Complex'
return '🔴 Very Complex'
}
@ -605,20 +474,31 @@ Create the test file at: ${testPath}
}
// ===== Complexity Warning =====
if (analysis.complexity > 50) {
guidelines.push('🔴 VERY COMPLEX component detected. Consider:')
if (analysis.complexity > 75) {
guidelines.push(`🔴 HIGH Total Complexity (${analysis.complexity}/100). Consider:`)
guidelines.push(' - Splitting component into smaller pieces before testing')
guidelines.push(' - Creating integration tests for complex workflows')
guidelines.push(' - Using test.each() for data-driven tests')
guidelines.push(' - Adding performance benchmarks')
}
else if (analysis.complexity > 30) {
guidelines.push('⚠️ This is a COMPLEX component. Consider:')
else if (analysis.complexity > 50) {
guidelines.push(`⚠️ MODERATE Total Complexity (${analysis.complexity}/100). Consider:`)
guidelines.push(' - Breaking tests into multiple describe blocks')
guidelines.push(' - Testing integration scenarios')
guidelines.push(' - Grouping related test cases')
}
// ===== Max Function Complexity Warning =====
if (analysis.maxComplexity > 75) {
guidelines.push(`🔴 HIGH Single Function Complexity (max: ${analysis.maxComplexity}/100). Consider:`)
guidelines.push(' - Breaking down the complex function into smaller helpers')
guidelines.push(' - Extracting logic into custom hooks or utility functions')
}
else if (analysis.maxComplexity > 50) {
guidelines.push(`⚠️ MODERATE Single Function Complexity (max: ${analysis.maxComplexity}/100). Consider:`)
guidelines.push(' - Simplifying conditional logic')
guidelines.push(' - Using early returns to reduce nesting')
}
// ===== State Management =====
if (analysis.hasState && analysis.hasEffects) {
guidelines.push('🔄 State + Effects detected:')
@ -976,7 +856,7 @@ function main() {
// Check if component is too complex - suggest refactoring instead of testing
// Skip this check in JSON mode to always output analysis result
if (!isReviewMode && !isJsonMode && (analysis.complexity > 50 || analysis.lineCount > 300)) {
if (!isReviewMode && !isJsonMode && (analysis.complexity > 75 || analysis.lineCount > 300)) {
console.log(`
COMPONENT TOO COMPLEX TO TEST
@ -987,8 +867,9 @@ function main() {
📊 Component Metrics:
Complexity: ${analysis.complexity} ${analysis.complexity > 50 ? '🔴 TOO HIGH' : '⚠️ WARNING'}
Lines: ${analysis.lineCount} ${analysis.lineCount > 300 ? '🔴 TOO LARGE' : '⚠️ WARNING'}
Total Complexity: ${analysis.complexity}/100 ${analysis.complexity > 75 ? '🔴 TOO HIGH' : analysis.complexity > 50 ? '⚠️ WARNING' : '🟢 OK'}
Max Func Complexity: ${analysis.maxComplexity}/100 ${analysis.maxComplexity > 75 ? '🔴 TOO HIGH' : analysis.maxComplexity > 50 ? '⚠️ WARNING' : '🟢 OK'}
Lines: ${analysis.lineCount} ${analysis.lineCount > 300 ? '🔴 TOO LARGE' : '🟢 OK'}
🚫 RECOMMENDATION: REFACTOR BEFORE TESTING
@ -1017,7 +898,7 @@ This component is too complex to test effectively. Please consider:
- Tests will be easier to write and maintain
💡 TIP: Aim for components with:
- Complexity score < 30 (preferably < 20)
- Cognitive Complexity < 50/100 (preferably < 25/100)
- Line count < 300 (preferably < 200)
- Single responsibility principle