feat: improve follow-up settings (#35442)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
非法操作 2026-04-22 16:55:16 +08:00 committed by hjlarry
parent 61736495d2
commit 19037268cd
42 changed files with 1086 additions and 349 deletions

View File

@ -139,19 +139,6 @@ Star Dify on GitHub and be instantly notified of new releases.
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
#### Customizing Suggested Questions
You can now customize the "Suggested Questions After Answer" feature to better fit your use case. For example, to generate longer, more technical questions:
```bash
# In your .env file
SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]'
SUGGESTED_QUESTIONS_MAX_TOKENS=512
SUGGESTED_QUESTIONS_TEMPERATURE=0.3
```
See the [Suggested Questions Configuration Guide](docs/suggested-questions-configuration.md) for detailed examples and usage instructions.
### Metrics Monitoring with Grafana
Import the dashboard to Grafana, using Dify's PostgreSQL database as data source, to monitor metrics in granularity of apps, tenants, messages, and more.

View File

@ -709,22 +709,6 @@ SWAGGER_UI_PATH=/swagger-ui.html
# Set to false to export dataset IDs as plain text for easier cross-environment import
DSL_EXPORT_ENCRYPT_DATASET_ID=true
# Suggested Questions After Answer Configuration
# These environment variables allow customization of the suggested questions feature
#
# Custom prompt for generating suggested questions (optional)
# If not set, uses the default prompt that generates 3 questions under 20 characters each
# Example: "Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: [\"question1\",\"question2\",\"question3\",\"question4\",\"question5\"]"
# SUGGESTED_QUESTIONS_PROMPT=
# Maximum number of tokens for suggested questions generation (default: 256)
# Adjust this value for longer questions or more questions
# SUGGESTED_QUESTIONS_MAX_TOKENS=256
# Temperature for suggested questions generation (default: 0.0)
# Higher values (0.5-1.0) produce more creative questions, lower values (0.0-0.3) produce more focused questions
# SUGGESTED_QUESTIONS_TEMPERATURE=0
# Tenant isolated task queue configuration
TENANT_ISOLATED_TASK_CONCURRENCY=1

View File

@ -1,5 +1,7 @@
from typing import Any
CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH = 1000
class SuggestedQuestionsAfterAnswerConfigManager:
@classmethod
@ -20,7 +22,11 @@ class SuggestedQuestionsAfterAnswerConfigManager:
@classmethod
def validate_and_set_defaults(cls, config: dict[str, Any]) -> tuple[dict[str, Any], list[str]]:
"""
Validate and set defaults for suggested questions feature
Validate and set defaults for suggested questions feature.
Optional fields:
- prompt: custom instruction prompt.
- model: provider/model configuration for suggested question generation.
:param config: app model config args
"""
@ -39,4 +45,27 @@ class SuggestedQuestionsAfterAnswerConfigManager:
if not isinstance(config["suggested_questions_after_answer"]["enabled"], bool):
raise ValueError("enabled in suggested_questions_after_answer must be of boolean type")
prompt = config["suggested_questions_after_answer"].get("prompt")
if prompt is not None and not isinstance(prompt, str):
raise ValueError("prompt in suggested_questions_after_answer must be of string type")
if isinstance(prompt, str) and len(prompt) > CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH:
raise ValueError(
f"prompt in suggested_questions_after_answer must be less than or equal to "
f"{CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH} characters"
)
if "model" in config["suggested_questions_after_answer"]:
model_config = config["suggested_questions_after_answer"]["model"]
if not isinstance(model_config, dict):
raise ValueError("model in suggested_questions_after_answer must be of object type")
if "provider" not in model_config or not isinstance(model_config["provider"], str):
raise ValueError("provider in suggested_questions_after_answer.model must be of string type")
if "name" not in model_config or not isinstance(model_config["name"], str):
raise ValueError("name in suggested_questions_after_answer.model must be of string type")
if "completion_params" in model_config and not isinstance(model_config["completion_params"], dict):
raise ValueError("completion_params in suggested_questions_after_answer.model must be of object type")
return config, ["suggested_questions_after_answer"]

View File

@ -2,7 +2,7 @@ import json
import logging
import re
from collections.abc import Sequence
from typing import Any, Protocol, TypedDict, cast
from typing import Any, NotRequired, Protocol, TypedDict, cast
import json_repair
from sqlalchemy import select
@ -13,13 +13,13 @@ from core.llm_generator.output_parser.rule_config_generator import RuleConfigGen
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.llm_generator.prompts import (
CONVERSATION_TITLE_PROMPT,
DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS,
DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE,
GENERATOR_QA_PROMPT,
JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE,
LLM_MODIFY_CODE_SYSTEM,
LLM_MODIFY_PROMPT_SYSTEM,
PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE,
SUGGESTED_QUESTIONS_MAX_TOKENS,
SUGGESTED_QUESTIONS_TEMPERATURE,
SYSTEM_STRUCTURED_OUTPUT_GENERATE,
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
)
@ -41,6 +41,36 @@ from models.workflow import Workflow
logger = logging.getLogger(__name__)
class SuggestedQuestionsModelConfig(TypedDict):
provider: str
name: str
completion_params: NotRequired[dict[str, object]]
def _normalize_completion_params(completion_params: dict[str, object]) -> tuple[dict[str, object], list[str]]:
"""
Normalize raw completion params into invocation parameters and stop sequences.
This mirrors the app-model access path by separating ``stop`` from provider
parameters before invocation, then drops non-positive token limits because
some plugin-backed models reject ``0`` after mapping ``max_tokens`` to their
provider-specific output-token field.
"""
normalized_parameters = dict(completion_params)
stop_value = normalized_parameters.pop("stop", [])
if isinstance(stop_value, list) and all(isinstance(item, str) for item in stop_value):
stop = stop_value
else:
stop = []
for token_limit_key in ("max_tokens", "max_output_tokens"):
token_limit = normalized_parameters.get(token_limit_key)
if isinstance(token_limit, int | float) and token_limit <= 0:
normalized_parameters.pop(token_limit_key, None)
return normalized_parameters, stop
class WorkflowServiceInterface(Protocol):
def get_draft_workflow(self, app_model: App, workflow_id: str | None = None) -> Workflow | None:
pass
@ -123,8 +153,15 @@ class LLMGenerator:
return name
@classmethod
def generate_suggested_questions_after_answer(cls, tenant_id: str, histories: str) -> Sequence[str]:
output_parser = SuggestedQuestionsAfterAnswerOutputParser()
def generate_suggested_questions_after_answer(
cls,
tenant_id: str,
histories: str,
*,
instruction_prompt: str | None = None,
model_config: object | None = None,
) -> Sequence[str]:
output_parser = SuggestedQuestionsAfterAnswerOutputParser(instruction_prompt=instruction_prompt)
format_instructions = output_parser.get_format_instructions()
prompt_template = PromptTemplateParser(template="{{histories}}\n{{format_instructions}}\nquestions:\n")
@ -133,10 +170,36 @@ class LLMGenerator:
try:
model_manager = ModelManager.for_tenant(tenant_id=tenant_id)
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
configured_model = cast(dict[str, object], model_config) if isinstance(model_config, dict) else {}
provider = configured_model.get("provider")
model_name = configured_model.get("name")
use_configured_model = False
if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name:
try:
model_instance = model_manager.get_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
provider=provider,
model=model_name,
)
use_configured_model = True
except Exception:
logger.warning(
"Failed to use configured suggested-questions model %s/%s, fallback to default model",
provider,
model_name,
exc_info=True,
)
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
else:
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
except InvokeAuthorizationError:
return []
@ -145,19 +208,29 @@ class LLMGenerator:
questions: Sequence[str] = []
try:
configured_completion_params = configured_model.get("completion_params")
if use_configured_model and isinstance(configured_completion_params, dict):
model_parameters, stop = _normalize_completion_params(configured_completion_params)
elif use_configured_model:
model_parameters = {}
stop = []
else:
# Default-model generation keeps the built-in suggested-questions tuning.
model_parameters = {
"max_tokens": DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS,
"temperature": DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE,
}
stop = []
response: LLMResult = model_instance.invoke_llm(
prompt_messages=list(prompt_messages),
model_parameters={
"max_tokens": SUGGESTED_QUESTIONS_MAX_TOKENS,
"temperature": SUGGESTED_QUESTIONS_TEMPERATURE,
},
model_parameters=model_parameters,
stop=stop,
stream=False,
)
text_content = response.message.get_text_content()
questions = output_parser.parse(text_content) if text_content else []
except InvokeError:
questions = []
except Exception:
logger.exception("Failed to generate suggested questions after answer")
questions = []

View File

@ -3,17 +3,21 @@ import logging
import re
from collections.abc import Sequence
from core.llm_generator.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
from core.llm_generator.prompts import DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
logger = logging.getLogger(__name__)
class SuggestedQuestionsAfterAnswerOutputParser:
def __init__(self, instruction_prompt: str | None = None) -> None:
self._instruction_prompt = instruction_prompt or DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
def get_format_instructions(self) -> str:
return SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
return self._instruction_prompt
def parse(self, text: str) -> Sequence[str]:
action_match = re.search(r"\[.*?\]", text.strip(), re.DOTALL)
stripped_text = text.strip()
action_match = re.search(r"\[.*?\]", stripped_text, re.DOTALL)
questions: list[str] = []
if action_match is not None:
try:
@ -23,4 +27,6 @@ class SuggestedQuestionsAfterAnswerOutputParser:
else:
if isinstance(json_obj, list):
questions = [question for question in json_obj if isinstance(question, str)]
elif stripped_text:
logger.warning("Failed to find suggested questions payload array in text: %r", stripped_text[:200])
return questions

View File

@ -1,5 +1,4 @@
# Written by YORKI MINAKO🤡, Edited by Xiaoyi, Edited by yasu-oh
import os
CONVERSATION_TITLE_PROMPT = """You are asked to generate a concise chat title by decomposing the users input into two parts: “Intention” and “Subject”.
@ -96,8 +95,8 @@ JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE = (
)
# Default prompt for suggested questions (can be overridden by environment variable)
_DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT = (
# Default prompt and model parameters for suggested questions.
DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
"Please help me predict the three most likely questions that human would ask, "
"and keep each question under 20 characters.\n"
"MAKE SURE your output is the SAME language as the Assistant's latest response. "
@ -105,14 +104,8 @@ _DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT = (
'["question1","question2","question3"]\n'
)
# Environment variable override for suggested questions prompt
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = os.getenv(
"SUGGESTED_QUESTIONS_PROMPT", _DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT
)
# Configurable LLM parameters for suggested questions (can be overridden by environment variables)
SUGGESTED_QUESTIONS_MAX_TOKENS = int(os.getenv("SUGGESTED_QUESTIONS_MAX_TOKENS", "256"))
SUGGESTED_QUESTIONS_TEMPERATURE = float(os.getenv("SUGGESTED_QUESTIONS_TEMPERATURE", "0"))
DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS = 256
DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE = 0.0
GENERATOR_QA_PROMPT = (
"<Task> The user will send a long text. Generate a Question and Answer pairs only using the knowledge"

View File

@ -91,6 +91,19 @@ class EnabledConfig(TypedDict):
enabled: bool
class SuggestedQuestionsAfterAnswerModelConfig(TypedDict):
provider: str
name: str
mode: NotRequired[str]
completion_params: NotRequired[dict[str, Any]]
class SuggestedQuestionsAfterAnswerConfig(TypedDict):
enabled: bool
model: NotRequired[SuggestedQuestionsAfterAnswerModelConfig]
prompt: NotRequired[str]
class EmbeddingModelInfo(TypedDict):
embedding_provider_name: str
embedding_model_name: str
@ -220,7 +233,7 @@ class ModelConfig(TypedDict):
class AppModelConfigDict(TypedDict):
opening_statement: str | None
suggested_questions: list[str]
suggested_questions_after_answer: EnabledConfig
suggested_questions_after_answer: SuggestedQuestionsAfterAnswerConfig
speech_to_text: EnabledConfig
text_to_speech: EnabledConfig
retriever_resource: EnabledConfig
@ -680,8 +693,13 @@ class AppModelConfig(TypeBase):
return cast(EnabledConfig, json.loads(value) if value else {"enabled": default_enabled})
@property
def suggested_questions_after_answer_dict(self) -> EnabledConfig:
return self._get_enabled_config(self.suggested_questions_after_answer)
def suggested_questions_after_answer_dict(self) -> SuggestedQuestionsAfterAnswerConfig:
return cast(
SuggestedQuestionsAfterAnswerConfig,
json.loads(self.suggested_questions_after_answer)
if self.suggested_questions_after_answer
else {"enabled": False},
)
@property
def speech_to_text_dict(self) -> EnabledConfig:

View File

@ -1,4 +1,6 @@
import logging
from collections.abc import Sequence
from typing import cast
from pydantic import TypeAdapter
from sqlalchemy import select
@ -17,7 +19,16 @@ from graphon.model_runtime.entities.model_entities import ModelType
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from models import Account
from models.enums import FeedbackFromSource, FeedbackRating
from models.model import App, AppMode, AppModelConfig, AppModelConfigDict, EndUser, Message, MessageFeedback
from models.model import (
App,
AppMode,
AppModelConfig,
AppModelConfigDict,
EndUser,
Message,
MessageFeedback,
SuggestedQuestionsAfterAnswerConfig,
)
from repositories.execution_extra_content_repository import ExecutionExtraContentRepository
from repositories.sqlalchemy_execution_extra_content_repository import (
SQLAlchemyExecutionExtraContentRepository,
@ -32,6 +43,7 @@ from services.errors.message import (
from services.workflow_service import WorkflowService
_app_model_config_adapter: TypeAdapter[AppModelConfigDict] = TypeAdapter(AppModelConfigDict)
logger = logging.getLogger(__name__)
def _create_execution_extra_content_repository() -> ExecutionExtraContentRepository:
@ -252,6 +264,7 @@ class MessageService:
)
model_manager = ModelManager.for_tenant(tenant_id=app_model.tenant_id)
suggested_questions_after_answer_config: SuggestedQuestionsAfterAnswerConfig = {"enabled": False}
if app_model.mode == AppMode.ADVANCED_CHAT:
workflow_service = WorkflowService()
@ -271,9 +284,11 @@ class MessageService:
if not app_config.additional_features.suggested_questions_after_answer:
raise SuggestedQuestionsAfterAnswerDisabledError()
model_instance = model_manager.get_default_model_instance(
tenant_id=app_model.tenant_id, model_type=ModelType.LLM
)
suggested_questions_after_answer = workflow.features_dict.get("suggested_questions_after_answer")
if isinstance(suggested_questions_after_answer, dict):
suggested_questions_after_answer_config = cast(
SuggestedQuestionsAfterAnswerConfig, suggested_questions_after_answer
)
else:
if not conversation.override_model_configs:
app_model_config = db.session.scalar(
@ -293,16 +308,14 @@ class MessageService:
if not app_model_config:
raise ValueError("did not find app model config")
suggested_questions_after_answer = app_model_config.suggested_questions_after_answer_dict
if suggested_questions_after_answer.get("enabled", False) is False:
suggested_questions_after_answer_config = app_model_config.suggested_questions_after_answer_dict
if suggested_questions_after_answer_config.get("enabled", False) is False:
raise SuggestedQuestionsAfterAnswerDisabledError()
model_instance = model_manager.get_model_instance(
tenant_id=app_model.tenant_id,
provider=app_model_config.model_dict["provider"],
model_type=ModelType.LLM,
model=app_model_config.model_dict["name"],
)
model_instance = model_manager.get_default_model_instance(
tenant_id=app_model.tenant_id,
model_type=ModelType.LLM,
)
# get memory of conversation (read-only)
memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance)
@ -312,9 +325,17 @@ class MessageService:
message_limit=3,
)
instruction_prompt = suggested_questions_after_answer_config.get("prompt")
if not isinstance(instruction_prompt, str) or not instruction_prompt.strip():
instruction_prompt = None
configured_model = suggested_questions_after_answer_config.get("model")
with measure_time() as timer:
questions_sequence = LLMGenerator.generate_suggested_questions_after_answer(
tenant_id=app_model.tenant_id, histories=histories
tenant_id=app_model.tenant_id,
histories=histories,
instruction_prompt=instruction_prompt,
model_config=configured_model,
)
questions: list[str] = list(questions_sequence)

View File

@ -77,6 +77,38 @@ class TestAdditionalFeatureManagers:
SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{"suggested_questions_after_answer": {"enabled": "yes"}}
)
with pytest.raises(ValueError):
SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{"suggested_questions_after_answer": {"enabled": True, "prompt": 123}}
)
with pytest.raises(ValueError, match="must be less than or equal to 1000 characters"):
SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{"suggested_questions_after_answer": {"enabled": True, "prompt": "a" * 1001}}
)
with pytest.raises(ValueError):
SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{"suggested_questions_after_answer": {"enabled": True, "model": "bad"}}
)
with pytest.raises(ValueError):
SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{"suggested_questions_after_answer": {"enabled": True, "model": {"provider": "openai"}}}
)
validated_config, _ = SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults(
{
"suggested_questions_after_answer": {
"enabled": True,
"prompt": "custom prompt",
"model": {
"provider": "openai",
"name": "gpt-4o-mini",
"completion_params": {"max_tokens": 1024},
},
}
}
)
assert validated_config["suggested_questions_after_answer"]["prompt"] == "custom prompt"
assert validated_config["suggested_questions_after_answer"]["model"]["name"] == "gpt-4o-mini"
assert (
SuggestedQuestionsAfterAnswerConfigManager.convert({"suggested_questions_after_answer": {"enabled": True}})

View File

@ -6,7 +6,12 @@ import pytest
from core.app.app_config.entities import ModelConfig
from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload
from core.llm_generator.llm_generator import LLMGenerator
from core.llm_generator.prompts import (
DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS,
DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE,
)
from graphon.model_runtime.entities.llm_entities import LLMMode, LLMResult
from graphon.model_runtime.entities.model_entities import ModelType
from graphon.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError
@ -96,6 +101,10 @@ class TestLLMGenerator:
questions = LLMGenerator.generate_suggested_questions_after_answer("tenant_id", "histories")
assert len(questions) == 2
assert questions[0] == "Question 1?"
assert mock_model_instance.invoke_llm.call_args.kwargs["model_parameters"] == {
"max_tokens": DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS,
"temperature": DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE,
}
def test_generate_suggested_questions_after_answer_auth_error(self, mock_model_instance):
with patch("core.llm_generator.llm_generator.ModelManager.for_tenant") as mock_manager:
@ -113,6 +122,97 @@ class TestLLMGenerator:
questions = LLMGenerator.generate_suggested_questions_after_answer("tenant_id", "histories")
assert questions == []
@patch("core.llm_generator.llm_generator.ModelManager.for_tenant")
def test_generate_suggested_questions_after_answer_with_custom_model_and_prompt(self, mock_for_tenant):
custom_model_instance = MagicMock()
custom_response = MagicMock()
custom_response.message.get_text_content.return_value = '["Question 1?"]'
custom_model_instance.invoke_llm.return_value = custom_response
mock_for_tenant.return_value.get_model_instance.return_value = custom_model_instance
questions = LLMGenerator.generate_suggested_questions_after_answer(
"tenant_id",
"histories",
instruction_prompt="custom prompt",
model_config={
"provider": "openai",
"name": "gpt-4o",
"completion_params": {"temperature": 0.2},
},
)
assert questions == ["Question 1?"]
mock_for_tenant.return_value.get_model_instance.assert_called_once_with(
tenant_id="tenant_id",
model_type=ModelType.LLM,
provider="openai",
model="gpt-4o",
)
invoke_kwargs = custom_model_instance.invoke_llm.call_args.kwargs
assert invoke_kwargs["model_parameters"] == {"temperature": 0.2}
assert invoke_kwargs["stop"] == []
assert "custom prompt" in invoke_kwargs["prompt_messages"][0].content
@patch("core.llm_generator.llm_generator.ModelManager.for_tenant")
def test_generate_suggested_questions_after_answer_fallback_to_default_model(self, mock_for_tenant):
default_model_instance = MagicMock()
default_response = MagicMock()
default_response.message.get_text_content.return_value = '["Question 1?"]'
default_model_instance.invoke_llm.return_value = default_response
mock_for_tenant.return_value.get_model_instance.side_effect = ValueError("invalid configured model")
mock_for_tenant.return_value.get_default_model_instance.return_value = default_model_instance
questions = LLMGenerator.generate_suggested_questions_after_answer(
"tenant_id",
"histories",
model_config={
"provider": "openai",
"name": "not-found-model",
"completion_params": {"temperature": 0.2},
},
)
assert questions == ["Question 1?"]
mock_for_tenant.return_value.get_default_model_instance.assert_called_once_with(
tenant_id="tenant_id",
model_type=ModelType.LLM,
)
assert default_model_instance.invoke_llm.call_args.kwargs["model_parameters"] == {
"max_tokens": DEFAULT_SUGGESTED_QUESTIONS_MAX_TOKENS,
"temperature": DEFAULT_SUGGESTED_QUESTIONS_TEMPERATURE,
}
assert default_model_instance.invoke_llm.call_args.kwargs["stop"] == []
@patch("core.llm_generator.llm_generator.ModelManager.for_tenant")
def test_generate_suggested_questions_after_answer_drops_non_positive_max_tokens(self, mock_for_tenant):
custom_model_instance = MagicMock()
custom_response = MagicMock()
custom_response.message.get_text_content.return_value = '["Question 1?"]'
custom_model_instance.invoke_llm.return_value = custom_response
mock_for_tenant.return_value.get_model_instance.return_value = custom_model_instance
questions = LLMGenerator.generate_suggested_questions_after_answer(
"tenant_id",
"histories",
model_config={
"provider": "openai",
"name": "gpt-4o",
"completion_params": {
"temperature": 0.2,
"max_tokens": 0,
"stop": ["END"],
},
},
)
assert questions == ["Question 1?"]
invoke_kwargs = custom_model_instance.invoke_llm.call_args.kwargs
assert invoke_kwargs["model_parameters"] == {"temperature": 0.2}
assert invoke_kwargs["stop"] == ["END"]
def test_generate_rule_config_no_variable_success(self, mock_model_instance, model_config_entity):
payload = RuleGeneratePayload(
instruction="test instruction", model_config=model_config_entity, no_variable=True

View File

@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch
import pytest
from graphon.model_runtime.entities.model_entities import ModelType
from libs.infinite_scroll_pagination import InfiniteScrollPagination
from models.enums import FeedbackFromSource, FeedbackRating
from models.model import App, AppMode, EndUser, Message
@ -931,6 +932,130 @@ class TestMessageServiceSuggestedQuestions:
assert result == ["Q1?"]
mock_llm_gen.generate_suggested_questions_after_answer.assert_called_once()
@patch("services.message_service.db")
@patch("services.message_service.ModelManager.for_tenant")
@patch("services.message_service.TokenBufferMemory")
@patch("services.message_service.LLMGenerator")
@patch("services.message_service.TraceQueueManager")
@patch.object(MessageService, "get_message")
@patch("services.message_service.ConversationService")
def test_get_suggested_questions_chat_app_uses_frontend_model_and_prompt(
self,
mock_conversation_service,
mock_get_message,
mock_trace_manager,
mock_llm_gen,
mock_memory,
mock_model_manager,
mock_db,
factory,
):
"""Test suggested question generation uses frontend configured model and prompt."""
from core.app.entities.app_invoke_entities import InvokeFrom
app = factory.create_app_mock(mode=AppMode.CHAT.value)
app.tenant_id = "tenant-123"
user = factory.create_end_user_mock()
message = factory.create_message_mock()
mock_get_message.return_value = message
conversation = MagicMock()
conversation.override_model_configs = None
mock_conversation_service.get_conversation.return_value = conversation
app_model_config = MagicMock()
app_model_config.suggested_questions_after_answer_dict = {
"enabled": True,
"prompt": "custom prompt",
"model": {
"provider": "openai",
"name": "gpt-4o-mini",
"completion_params": {"max_tokens": 2048, "temperature": 0.1},
},
}
mock_db.session.scalar.return_value = app_model_config
mock_memory.return_value.get_history_prompt_text.return_value = "histories"
mock_llm_gen.generate_suggested_questions_after_answer.return_value = ["Q1?"]
result = MessageService.get_suggested_questions_after_answer(
app_model=app,
user=user,
message_id="msg-123",
invoke_from=InvokeFrom.WEB_APP,
)
assert result == ["Q1?"]
mock_model_manager.return_value.get_default_model_instance.assert_called_once_with(
tenant_id="tenant-123",
model_type=ModelType.LLM,
)
mock_memory.assert_called_once_with(
conversation=conversation,
model_instance=mock_model_manager.return_value.get_default_model_instance.return_value,
)
mock_llm_gen.generate_suggested_questions_after_answer.assert_called_once_with(
tenant_id="tenant-123",
histories="histories",
instruction_prompt="custom prompt",
model_config={
"provider": "openai",
"name": "gpt-4o-mini",
"completion_params": {"max_tokens": 2048, "temperature": 0.1},
},
)
@patch("services.message_service.db")
@patch("services.message_service.ModelManager.for_tenant")
@patch("services.message_service.TokenBufferMemory")
@patch("services.message_service.LLMGenerator")
@patch("services.message_service.TraceQueueManager")
@patch.object(MessageService, "get_message")
@patch("services.message_service.ConversationService")
def test_get_suggested_questions_chat_app_invalid_frontend_model_fallback_to_default(
self,
mock_conversation_service,
mock_get_message,
mock_trace_manager,
mock_llm_gen,
mock_memory,
mock_model_manager,
mock_db,
factory,
):
"""Test invalid frontend configured model falls back to tenant default model."""
app = factory.create_app_mock(mode=AppMode.CHAT.value)
app.tenant_id = "tenant-123"
user = factory.create_end_user_mock()
message = factory.create_message_mock()
mock_get_message.return_value = message
conversation = MagicMock()
conversation.override_model_configs = None
mock_conversation_service.get_conversation.return_value = conversation
app_model_config = MagicMock()
app_model_config.suggested_questions_after_answer_dict = {
"enabled": True,
"model": {"provider": "openai", "name": "invalid-model"},
}
mock_db.session.scalar.return_value = app_model_config
mock_model_manager.return_value.get_model_instance.side_effect = ValueError("invalid model")
mock_memory.return_value.get_history_prompt_text.return_value = "histories"
mock_llm_gen.generate_suggested_questions_after_answer.return_value = ["Q1?"]
result = MessageService.get_suggested_questions_after_answer(
app_model=app, user=user, message_id="msg-123", invoke_from=MagicMock()
)
assert result == ["Q1?"]
mock_model_manager.return_value.get_default_model_instance.assert_called_once_with(
tenant_id="tenant-123",
model_type=ModelType.LLM,
)
mock_model_manager.return_value.get_model_instance.assert_not_called()
# Test 30: get_suggested_questions_after_answer - Disabled Error
@patch("services.message_service.WorkflowService")
@patch("services.message_service.AdvancedChatAppConfigManager")

View File

@ -1,253 +0,0 @@
# Configurable Suggested Questions After Answer
This document explains how to configure the "Suggested Questions After Answer" feature in Dify using environment variables.
## Overview
The suggested questions feature generates follow-up questions after each AI response to help users continue the conversation. By default, Dify generates 3 short questions (under 20 characters each), but you can customize this behavior to better fit your specific use case.
## Environment Variables
### `SUGGESTED_QUESTIONS_PROMPT`
**Description**: Custom prompt template for generating suggested questions.
**Default**:
```
Please help me predict the three most likely questions that human would ask, and keep each question under 20 characters.
MAKE SURE your output is the SAME language as the Assistant's latest response.
The output must be an array in JSON format following the specified schema:
["question1","question2","question3"]
```
**Usage Examples**:
1. **Technical/Developer Questions (Your Use Case)**:
```bash
export SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]'
```
1. **Customer Support**:
```bash
export SUGGESTED_QUESTIONS_PROMPT='Generate 3 helpful follow-up questions that guide customers toward solving their own problems. Focus on troubleshooting steps and common issues. Keep questions under 30 characters. JSON format: ["q1","q2","q3"]'
```
1. **Educational Content**:
```bash
export SUGGESTED_QUESTIONS_PROMPT='Create 4 thought-provoking questions that help students deeper understand the topic. Focus on concepts, relationships, and applications. Questions should be 25-40 characters. JSON: ["question1","question2","question3","question4"]'
```
1. **Multilingual Support**:
```bash
export SUGGESTED_QUESTIONS_PROMPT='Generate exactly 3 follow-up questions in the same language as the conversation. Adapt question length appropriately for the language (Chinese: 10-15 chars, English: 20-30 chars, Arabic: 25-35 chars). Always output valid JSON array.'
```
**Important Notes**:
- The prompt must request JSON array output format
- Include language matching instructions for multilingual support
- Specify clear character limits or question count requirements
- Focus on your specific domain or use case
### `SUGGESTED_QUESTIONS_MAX_TOKENS`
**Description**: Maximum number of tokens for the LLM response.
**Default**: `256`
**Usage**:
```bash
export SUGGESTED_QUESTIONS_MAX_TOKENS=512 # For longer questions or more questions
```
**Recommended Values**:
- `256`: Default, good for 3-4 short questions
- `384`: Medium, good for 4-5 medium-length questions
- `512`: High, good for 5+ longer questions or complex prompts
- `1024`: Maximum, for very complex question generation
### `SUGGESTED_QUESTIONS_TEMPERATURE`
**Description**: Temperature parameter for LLM creativity.
**Default**: `0.0`
**Usage**:
```bash
export SUGGESTED_QUESTIONS_TEMPERATURE=0.3 # Balanced creativity
```
**Recommended Values**:
- `0.0-0.2`: Very focused, predictable questions (good for technical support)
- `0.3-0.5`: Balanced creativity and relevance (good for general use)
- `0.6-0.8`: More creative, diverse questions (good for brainstorming)
- `0.9-1.0`: Maximum creativity (good for educational exploration)
## Configuration Examples
### Example 1: Developer Documentation Chatbot
```bash
# .env file
SUGGESTED_QUESTIONS_PROMPT='Generate exactly 5 technical follow-up questions that developers would ask after reading code documentation. Focus on implementation details, edge cases, performance considerations, and best practices. Each question should be 40-60 characters long. Output as JSON array: ["question1","question2","question3","question4","question5"]'
SUGGESTED_QUESTIONS_MAX_TOKENS=512
SUGGESTED_QUESTIONS_TEMPERATURE=0.3
```
### Example 2: Customer Service Bot
```bash
# .env file
SUGGESTED_QUESTIONS_PROMPT='Create 3 actionable follow-up questions that help customers resolve their own issues. Focus on common problems, troubleshooting steps, and product features. Keep questions simple and under 25 characters. JSON: ["q1","q2","q3"]'
SUGGESTED_QUESTIONS_MAX_TOKENS=256
SUGGESTED_QUESTIONS_TEMPERATURE=0.1
```
### Example 3: Educational Tutor
```bash
# .env file
SUGGESTED_QUESTIONS_PROMPT='Generate 4 thought-provoking questions that help students deepen their understanding of the topic. Focus on relationships between concepts, practical applications, and critical thinking. Questions should be 30-45 characters. Output: ["question1","question2","question3","question4"]'
SUGGESTED_QUESTIONS_MAX_TOKENS=384
SUGGESTED_QUESTIONS_TEMPERATURE=0.6
```
## Implementation Details
### How It Works
1. **Environment Variable Loading**: The system checks for environment variables at startup
1. **Fallback to Defaults**: If no environment variables are set, original behavior is preserved
1. **Prompt Template**: The custom prompt is used as-is, allowing full control over question generation
1. **LLM Parameters**: Custom max_tokens and temperature are passed to the LLM API
1. **JSON Parsing**: The system expects JSON array output and parses it accordingly
### File Changes
The implementation modifies these files:
- `api/core/llm_generator/prompts.py`: Environment variable support
- `api/core/llm_generator/llm_generator.py`: Custom LLM parameters
- `api/.env.example`: Documentation of new variables
### Backward Compatibility
- ✅ **Zero Breaking Changes**: Works exactly as before if no environment variables are set
- ✅ **Default Behavior Preserved**: Original prompt and parameters used as fallbacks
- ✅ **No Database Changes**: Pure environment variable configuration
- ✅ **No UI Changes Required**: Configuration happens at deployment level
## Testing Your Configuration
### Local Testing
1. Set environment variables:
```bash
export SUGGESTED_QUESTIONS_PROMPT='Your test prompt...'
export SUGGESTED_QUESTIONS_MAX_TOKENS=300
export SUGGESTED_QUESTIONS_TEMPERATURE=0.4
```
1. Start Dify API:
```bash
cd api
python -m flask run --host 0.0.0.0 --port=5001 --debug
```
1. Test the feature in your chat application and verify the questions match your expectations.
### Monitoring
Monitor the following when testing:
- **Question Quality**: Are questions relevant and helpful?
- **Language Matching**: Do questions match the conversation language?
- **JSON Format**: Is output properly formatted as JSON array?
- **Length Constraints**: Do questions follow your length requirements?
- **Response Time**: Are the custom parameters affecting performance?
## Troubleshooting
### Common Issues
1. **Invalid JSON Output**:
- **Problem**: LLM doesn't return valid JSON
- **Solution**: Make sure your prompt explicitly requests JSON array format
1. **Questions Too Long/Short**:
- **Problem**: Questions don't follow length constraints
- **Solution**: Be more specific about character limits in your prompt
1. **Too Few/Many Questions**:
- **Problem**: Wrong number of questions generated
- **Solution**: Clearly specify the exact number in your prompt
1. **Language Mismatch**:
- **Problem**: Questions in wrong language
- **Solution**: Include explicit language matching instructions in prompt
1. **Performance Issues**:
- **Problem**: Slow response times
- **Solution**: Reduce `SUGGESTED_QUESTIONS_MAX_TOKENS` or simplify prompt
### Debug Logging
To debug your configuration, you can temporarily add logging to see the actual prompt and parameters being used:
```python
import logging
logger = logging.getLogger(__name__)
# In llm_generator.py
logger.info(f"Suggested questions prompt: {prompt}")
logger.info(f"Max tokens: {SUGGESTED_QUESTIONS_MAX_TOKENS}")
logger.info(f"Temperature: {SUGGESTED_QUESTIONS_TEMPERATURE}")
```
## Migration Guide
### From Default Configuration
If you're currently using the default configuration and want to customize:
1. **Assess Your Needs**: Determine what aspects need customization (question count, length, domain focus)
1. **Design Your Prompt**: Write a custom prompt that addresses your specific use case
1. **Choose Parameters**: Select appropriate max_tokens and temperature values
1. **Test Incrementally**: Start with small changes and test thoroughly
1. **Deploy Gradually**: Roll out to production after successful testing
### Best Practices
1. **Start Simple**: Begin with minimal changes to the default prompt
1. **Test Thoroughly**: Test with various conversation types and languages
1. **Monitor Performance**: Watch for impact on response times and costs
1. **Get User Feedback**: Collect feedback on question quality and relevance
1. **Iterate**: Refine your configuration based on real-world usage
## Future Enhancements
This environment variable approach provides immediate customization while maintaining backward compatibility. Future enhancements could include:
1. **App-Level Configuration**: Different apps with different suggested question settings
1. **Dynamic Prompts**: Context-aware prompts based on conversation content
1. **Multi-Model Support**: Different models for different types of questions
1. **Analytics Dashboard**: Insights into question effectiveness and usage patterns
1. **A/B Testing**: Built-in testing of different prompt configurations
For now, the environment variable approach offers a simple, reliable way to customize the suggested questions feature for your specific needs.

View File

@ -0,0 +1,97 @@
import type { SuggestedQuestionsAfterAnswer } from '@/app/components/base/features/types'
import { fireEvent, render, screen } from '@testing-library/react'
import userEvent from '@testing-library/user-event'
import FollowUpSettingModal from '../follow-up-setting-modal'
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
useModelListAndDefaultModelAndCurrentProviderAndModel: () => ({
defaultModel: {
provider: {
provider: 'openai',
},
model: 'gpt-4o-mini',
},
}),
}))
vi.mock('@/app/components/header/account-setting/model-provider-page/model-parameter-modal', () => ({
default: ({ provider, modelId }: { provider: string, modelId: string }) => (
<div data-testid="model-parameter-modal">{`${provider}:${modelId}`}</div>
),
}))
const renderModal = (data: SuggestedQuestionsAfterAnswer = { enabled: true }) => {
const onSave = vi.fn()
const onCancel = vi.fn()
render(
<FollowUpSettingModal
data={data}
onSave={onSave}
onCancel={onCancel}
/>,
)
return {
onSave,
onCancel,
}
}
describe('FollowUpSettingModal', () => {
beforeEach(() => {
vi.clearAllMocks()
})
describe('Default Prompt', () => {
it('should show the system default prompt and save without a custom prompt when no custom prompt is configured', async () => {
const user = userEvent.setup()
const { onSave } = renderModal()
expect(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption')).toBeInTheDocument()
expect(screen.getByText(/Please predict the three most likely follow-up questions a user would ask/)).toBeInTheDocument()
await user.click(screen.getByText(/common\.operation\.save/))
expect(onSave).toHaveBeenCalledWith(expect.objectContaining({
prompt: undefined,
model: expect.objectContaining({
provider: 'openai',
name: 'gpt-4o-mini',
}),
}))
})
})
describe('Custom Prompt', () => {
it('should enable custom prompt input and save the custom prompt when selected', async () => {
const user = userEvent.setup()
const { onSave } = renderModal()
await user.click(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.customPromptOption').closest('button')!)
const textarea = screen.getByPlaceholderText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder')
expect(textarea).toHaveAttribute('maxLength', '1000')
fireEvent.change(
textarea,
{ target: { value: 'Use a custom follow-up prompt.' } },
)
await user.click(screen.getByText(/common\.operation\.save/))
expect(onSave).toHaveBeenCalledWith(expect.objectContaining({
prompt: 'Use a custom follow-up prompt.',
}))
})
it('should disable save when custom prompt is selected but empty', async () => {
const user = userEvent.setup()
renderModal()
await user.click(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.customPromptOption').closest('button')!)
expect(screen.getByText(/common\.operation\.save/).closest('button')).toBeDisabled()
})
})
})

View File

@ -1,12 +1,55 @@
import type { OnFeaturesChange } from '../../types'
import type {
OnFeaturesChange,
SuggestedQuestionsAfterAnswer,
} from '../../types'
import { fireEvent, render, screen } from '@testing-library/react'
import * as React from 'react'
import { FeaturesProvider } from '../../context'
import FollowUp from '../follow-up'
const renderWithProvider = (props: { disabled?: boolean, onChange?: OnFeaturesChange } = {}) => {
vi.mock('../follow-up-setting-modal', () => ({
default: ({ onSave, onCancel }: { onSave: (newState: unknown) => void, onCancel: () => void }) => (
<div data-testid="follow-up-setting-modal">
<button
type="button"
onClick={() => onSave({
enabled: true,
prompt: 'test prompt',
model: {
provider: 'openai',
name: 'gpt-4o-mini',
mode: 'chat',
completion_params: {
temperature: 0.7,
max_tokens: 0,
top_p: 0,
echo: false,
stop: [],
presence_penalty: 0,
frequency_penalty: 0,
},
},
})}
>
save-settings
</button>
<button type="button" onClick={onCancel}>cancel-settings</button>
</div>
),
}))
const renderWithProvider = (
props: {
disabled?: boolean
onChange?: OnFeaturesChange
suggested?: SuggestedQuestionsAfterAnswer
} = {},
) => {
return render(
<FeaturesProvider>
<FeaturesProvider features={{
suggested: props.suggested || { enabled: false },
}}
>
<FollowUp disabled={props.disabled} onChange={props.onChange} />
</FeaturesProvider>,
)
@ -45,4 +88,44 @@ describe('FollowUp', () => {
expect(() => fireEvent.click(screen.getByRole('switch'))).not.toThrow()
})
it('should render edit button when enabled and hovering', () => {
renderWithProvider({
suggested: {
enabled: true,
},
})
fireEvent.mouseEnter(screen.getByText(/feature\.suggestedQuestionsAfterAnswer\.title/).closest('[class]')!)
expect(screen.getByText(/operation\.settings/)).toBeInTheDocument()
})
it('should open settings modal and save follow-up config', () => {
const onChange = vi.fn()
renderWithProvider({
onChange,
suggested: {
enabled: true,
},
})
fireEvent.mouseEnter(screen.getByText(/feature\.suggestedQuestionsAfterAnswer\.title/).closest('[class]')!)
fireEvent.click(screen.getByText(/operation\.settings/))
expect(screen.getByTestId('follow-up-setting-modal')).toBeInTheDocument()
fireEvent.click(screen.getByText('save-settings'))
expect(onChange).toHaveBeenCalledWith(expect.objectContaining({
suggested: expect.objectContaining({
enabled: true,
prompt: 'test prompt',
model: expect.objectContaining({
provider: 'openai',
name: 'gpt-4o-mini',
}),
}),
}))
})
})

View File

@ -0,0 +1,241 @@
import type { SuggestedQuestionsAfterAnswer } from '@/app/components/base/features/types'
import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type {
CompletionParams,
Model,
ModelModeType,
} from '@/types/app'
import { Button } from '@langgenius/dify-ui/button'
import { cn } from '@langgenius/dify-ui/cn'
import { Dialog, DialogCloseButton, DialogContent, DialogTitle } from '@langgenius/dify-ui/dialog'
import { produce } from 'immer'
import { useCallback, useMemo, useState } from 'react'
import { useTranslation } from 'react-i18next'
import Radio from '@/app/components/base/radio/ui'
import Textarea from '@/app/components/base/textarea'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal'
import { ModelModeType as ModelModeTypeEnum } from '@/types/app'
type FollowUpSettingModalProps = {
data: SuggestedQuestionsAfterAnswer
onSave: (newState: SuggestedQuestionsAfterAnswer) => void
onCancel: () => void
}
const DEFAULT_COMPLETION_PARAMS: CompletionParams = {
temperature: 0.7,
max_tokens: 0,
top_p: 0,
echo: false,
stop: [],
presence_penalty: 0,
frequency_penalty: 0,
}
const DEFAULT_FOLLOW_UP_PROMPT = `Please predict the three most likely follow-up questions a user would ask, keep each question under 20 characters, use the same language as the assistant's latest response, and output a JSON array like ["question1", "question2", "question3"].`
const CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH = 1000
const getInitialModel = (model?: Model): Model => ({
provider: model?.provider || '',
name: model?.name || '',
mode: model?.mode || ModelModeTypeEnum.chat,
completion_params: {
...DEFAULT_COMPLETION_PARAMS,
...(model?.completion_params || {}),
},
})
const PROMPT_MODE = {
default: 'default',
custom: 'custom',
} as const
type PromptMode = typeof PROMPT_MODE[keyof typeof PROMPT_MODE]
const FollowUpSettingModal = ({
data,
onSave,
onCancel,
}: FollowUpSettingModalProps) => {
const { t } = useTranslation()
const [model, setModel] = useState<Model>(() => getInitialModel(data.model))
const [prompt, setPrompt] = useState(data.prompt || '')
const [promptMode, setPromptMode] = useState<PromptMode>(
data.prompt ? PROMPT_MODE.custom : PROMPT_MODE.default,
)
const { defaultModel } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration)
const selectedModel = useMemo<Model>(() => {
if (model.provider && model.name)
return model
if (!defaultModel)
return model
return {
...model,
provider: defaultModel.provider.provider,
name: defaultModel.model,
}
}, [defaultModel, model])
const handleModelChange = useCallback((newValue: { modelId: string, provider: string, mode?: string, features?: string[] }) => {
setModel(prev => ({
...prev,
provider: newValue.provider,
name: newValue.modelId,
mode: (newValue.mode as ModelModeType) || prev.mode || ModelModeTypeEnum.chat,
}))
}, [])
const handleCompletionParamsChange = useCallback((newParams: FormValue) => {
setModel({
...selectedModel,
completion_params: {
...DEFAULT_COMPLETION_PARAMS,
...(newParams as Partial<CompletionParams>),
},
})
}, [selectedModel])
const handleSave = useCallback(() => {
const trimmedPrompt = prompt.trim()
const nextFollowUpState = produce(data, (draft) => {
if (selectedModel.provider && selectedModel.name)
draft.model = selectedModel
else
draft.model = undefined
draft.prompt = promptMode === PROMPT_MODE.custom
? (trimmedPrompt || undefined)
: undefined
})
onSave(nextFollowUpState)
}, [data, onSave, prompt, promptMode, selectedModel])
const isCustomPromptInvalid = promptMode === PROMPT_MODE.custom && !prompt.trim()
return (
<Dialog
open
onOpenChange={(open) => {
if (!open)
onCancel()
}}
>
<DialogContent className="w-[640px]! max-w-none! p-8! pb-6!">
<DialogCloseButton className="top-8 right-8" />
<DialogTitle className="pr-8 text-xl font-semibold text-text-primary">
{t('feature.suggestedQuestionsAfterAnswer.modal.title', { ns: 'appDebug' })}
</DialogTitle>
<div className="mt-6 space-y-4">
<div>
<div className="mb-1.5 system-sm-semibold-uppercase text-text-secondary">
{t('feature.suggestedQuestionsAfterAnswer.modal.modelLabel', { ns: 'appDebug' })}
</div>
<ModelParameterModal
popupClassName="w-[520px]!"
isAdvancedMode
provider={selectedModel.provider}
completionParams={selectedModel.completion_params}
modelId={selectedModel.name}
setModel={handleModelChange}
onCompletionParamsChange={handleCompletionParamsChange}
hideDebugWithMultipleModel
/>
</div>
<div>
<div className="mb-1.5 system-sm-semibold-uppercase text-text-secondary">
{t('feature.suggestedQuestionsAfterAnswer.modal.promptLabel', { ns: 'appDebug' })}
</div>
<div className="space-y-3" role="radiogroup" aria-label={t('feature.suggestedQuestionsAfterAnswer.modal.promptLabel', { ns: 'appDebug' }) || ''}>
<button
type="button"
role="radio"
aria-checked={promptMode === PROMPT_MODE.default}
className={cn(
'w-full rounded-xl border p-4 text-left transition-colors',
promptMode === PROMPT_MODE.default
? 'border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg'
: 'border-components-option-card-option-border bg-components-option-card-option-bg hover:bg-state-base-hover',
)}
onClick={() => setPromptMode(PROMPT_MODE.default)}
>
<div className="flex items-start justify-between gap-3">
<div>
<div className="system-sm-semibold text-text-primary">
{t('feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption', { ns: 'appDebug' })}
</div>
<div className="mt-1 system-xs-regular text-text-tertiary">
{t('feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription', { ns: 'appDebug' })}
</div>
</div>
<div aria-hidden="true">
<Radio isChecked={promptMode === PROMPT_MODE.default} />
</div>
</div>
{promptMode === PROMPT_MODE.default && (
<div className="mt-3 rounded-lg border border-components-input-border-active bg-components-input-bg-normal px-3 py-2">
<div className="system-sm-regular break-words whitespace-pre-wrap text-text-secondary">
{DEFAULT_FOLLOW_UP_PROMPT}
</div>
</div>
)}
</button>
<button
type="button"
role="radio"
aria-checked={promptMode === PROMPT_MODE.custom}
className={cn(
'w-full rounded-xl border p-4 text-left transition-colors',
promptMode === PROMPT_MODE.custom
? 'border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg'
: 'border-components-option-card-option-border bg-components-option-card-option-bg hover:bg-state-base-hover',
)}
onClick={() => setPromptMode(PROMPT_MODE.custom)}
>
<div className="flex items-start justify-between gap-3">
<div>
<div className="system-sm-semibold text-text-primary">
{t('feature.suggestedQuestionsAfterAnswer.modal.customPromptOption', { ns: 'appDebug' })}
</div>
<div className="mt-1 system-xs-regular text-text-tertiary">
{t('feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription', { ns: 'appDebug' })}
</div>
</div>
<div aria-hidden="true">
<Radio isChecked={promptMode === PROMPT_MODE.custom} />
</div>
</div>
{promptMode === PROMPT_MODE.custom && (
<Textarea
className="mt-3 min-h-32 resize-y border-components-input-border-active bg-components-input-bg-normal"
value={prompt}
onChange={e => setPrompt(e.target.value)}
maxLength={CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH}
placeholder={t('feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder', { ns: 'appDebug' }) || ''}
/>
)}
</button>
</div>
</div>
</div>
<div className="mt-6 flex items-center justify-end gap-2">
<Button onClick={onCancel}>
{t('operation.cancel', { ns: 'common' })}
</Button>
<Button
variant="primary"
disabled={isCustomPromptInvalid}
onClick={handleSave}
>
{t('operation.save', { ns: 'common' })}
</Button>
</div>
</DialogContent>
</Dialog>
)
}
export default FollowUpSettingModal

View File

@ -1,10 +1,16 @@
import type { OnFeaturesChange } from '@/app/components/base/features/types'
import type {
OnFeaturesChange,
SuggestedQuestionsAfterAnswer,
} from '@/app/components/base/features/types'
import { Button } from '@langgenius/dify-ui/button'
import { RiEqualizer2Line } from '@remixicon/react'
import { produce } from 'immer'
import * as React from 'react'
import { useCallback } from 'react'
import { useCallback, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks'
import FeatureCard from '@/app/components/base/features/new-feature-panel/feature-card'
import FollowUpSettingModal from '@/app/components/base/features/new-feature-panel/follow-up-setting-modal'
import { FeatureEnum } from '@/app/components/base/features/types'
import { VirtualAssistant } from '@/app/components/base/icons/src/vender/features'
@ -18,8 +24,10 @@ const FollowUp = ({
onChange,
}: Props) => {
const { t } = useTranslation()
const features = useFeatures(s => s.features)
const suggested = useFeatures(s => s.features.suggested)
const featuresStore = useFeaturesStore()
const [isHovering, setIsHovering] = useState(false)
const [isShowSettingModal, setIsShowSettingModal] = useState(false)
const handleChange = useCallback((type: FeatureEnum, enabled: boolean) => {
const {
@ -38,19 +46,76 @@ const FollowUp = ({
onChange(newFeatures)
}, [featuresStore, onChange])
const handleSave = useCallback((newSuggested: SuggestedQuestionsAfterAnswer) => {
const {
features,
setFeatures,
} = featuresStore!.getState()
const newFeatures = produce(features, (draft) => {
draft.suggested = {
...newSuggested,
enabled: true,
}
})
setFeatures(newFeatures)
setIsShowSettingModal(false)
if (onChange)
onChange(newFeatures)
}, [featuresStore, onChange])
const handleOpenSettingModal = useCallback(() => {
if (disabled)
return
setIsShowSettingModal(true)
}, [disabled])
return (
<FeatureCard
icon={(
<div className="shrink-0 rounded-lg border-[0.5px] border-divider-subtle bg-util-colors-blue-light-blue-light-500 p-1 shadow-xs">
<VirtualAssistant className="h-4 w-4 text-text-primary-on-surface" />
</div>
<>
<FeatureCard
icon={(
<div className="shrink-0 rounded-lg border-[0.5px] border-divider-subtle bg-util-colors-blue-light-blue-light-500 p-1 shadow-xs">
<VirtualAssistant className="h-4 w-4 text-text-primary-on-surface" />
</div>
)}
title={t('feature.suggestedQuestionsAfterAnswer.title', { ns: 'appDebug' })}
value={!!suggested?.enabled}
onChange={state => handleChange(FeatureEnum.suggested, state)}
onMouseEnter={() => setIsHovering(true)}
onMouseLeave={() => setIsHovering(false)}
disabled={disabled}
>
<>
{!suggested?.enabled && (
<div className="line-clamp-2 min-h-8 system-xs-regular text-text-tertiary">
{t('feature.suggestedQuestionsAfterAnswer.description', { ns: 'appDebug' })}
</div>
)}
{!!suggested?.enabled && (
<>
{!isHovering && (
<div className="line-clamp-2 min-h-8 system-xs-regular text-text-tertiary">
{suggested.model?.name || t('feature.suggestedQuestionsAfterAnswer.modal.defaultModel', { ns: 'appDebug' })}
</div>
)}
{isHovering && (
<Button className="w-full" onClick={handleOpenSettingModal} disabled={disabled}>
<RiEqualizer2Line className="mr-1 h-4 w-4" />
{t('operation.settings', { ns: 'common' })}
</Button>
)}
</>
)}
</>
</FeatureCard>
{isShowSettingModal && (
<FollowUpSettingModal
data={suggested || { enabled: true }}
onSave={handleSave}
onCancel={() => setIsShowSettingModal(false)}
/>
)}
title={t('feature.suggestedQuestionsAfterAnswer.title', { ns: 'appDebug' })}
value={!!features.suggested?.enabled}
description={t('feature.suggestedQuestionsAfterAnswer.description', { ns: 'appDebug' })!}
onChange={state => handleChange(FeatureEnum.suggested, state)}
disabled={disabled}
/>
</>
)
}

View File

@ -1,5 +1,10 @@
import type { FileUploadConfigResponse } from '@/models/common'
import type { Resolution, TransferMethod, TtsAutoPlay } from '@/types/app'
import type {
Model,
Resolution,
TransferMethod,
TtsAutoPlay,
} from '@/types/app'
export type EnabledOrDisabled = {
enabled?: boolean
@ -12,7 +17,10 @@ export type OpeningStatement = EnabledOrDisabled & {
suggested_questions?: string[]
}
export type SuggestedQuestionsAfterAnswer = EnabledOrDisabled
export type SuggestedQuestionsAfterAnswer = EnabledOrDisabled & {
model?: Model
prompt?: string
}
export type TextToSpeech = EnabledOrDisabled & {
language?: string

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "تم تمكين الإدخال الصوتي",
"feature.speechToText.title": "تحويل الكلام إلى نص",
"feature.suggestedQuestionsAfterAnswer.description": "يمكن أن يعطي إعداد اقتراح الأسئلة التالية للمستخدمين دردشة أفضل.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "موجّه مخصّص",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "اكتب واستخدم موجّهك الخاص لتوليد أسئلة المتابعة.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "موجّه النظام الافتراضي",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "استخدم الموجّه المدمج لتوليد أسئلة المتابعة.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "أدخل موجّهًا مخصصًا",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 اقتراحات للسؤال التالي للمستخدم.",
"feature.suggestedQuestionsAfterAnswer.title": "متابعة",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "حاول أن تسأل",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Spracheingabe ist aktiviert",
"feature.speechToText.title": "Sprache zu Text",
"feature.suggestedQuestionsAfterAnswer.description": "Das Einrichten von Vorschlägen für nächste Fragen kann den Chat für Benutzer verbessern.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Benutzerdefinierter Prompt",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Schreiben und verwenden Sie Ihren eigenen Prompt zur Generierung von Folgefragen.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Standard-Systemprompt",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Verwenden Sie den integrierten Prompt zur Generierung von Folgefragen.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Benutzerdefinierten Prompt eingeben",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 Vorschläge für die nächste Benutzerfrage.",
"feature.suggestedQuestionsAfterAnswer.title": "Nachfolgefragen",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Versuchen Sie zu fragen",

View File

@ -160,6 +160,15 @@
"feature.speechToText.resDes": "Voice input is enabled",
"feature.speechToText.title": "Speech to Text",
"feature.suggestedQuestionsAfterAnswer.description": "Setting up next questions suggestion can give users a better chat.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Custom prompt",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Write and use your own follow-up generation prompt.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultModel": "System default model",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "System default prompt",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Use the built-in prompt for follow-up question generation.",
"feature.suggestedQuestionsAfterAnswer.modal.modelLabel": "Model",
"feature.suggestedQuestionsAfterAnswer.modal.promptLabel": "Prompt",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Enter a custom prompt",
"feature.suggestedQuestionsAfterAnswer.modal.title": "Follow-up settings",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 suggestions for user next question.",
"feature.suggestedQuestionsAfterAnswer.title": "Follow-up",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Try to ask",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Entrada de voz habilitada",
"feature.speechToText.title": "Voz a Texto",
"feature.suggestedQuestionsAfterAnswer.description": "Configurar sugerencias de próximas preguntas puede proporcionar una mejor conversación.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt personalizado",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Escribe y usa tu propio prompt para generar preguntas de seguimiento.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt predeterminado del sistema",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Usa el prompt integrado para generar preguntas de seguimiento.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Introduce un prompt personalizado",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 sugerencias para la próxima pregunta del usuario.",
"feature.suggestedQuestionsAfterAnswer.title": "Seguimiento",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Intenta preguntar",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "ورودی صوتی فعال شده است",
"feature.speechToText.title": "تبدیل گفتار به متن",
"feature.suggestedQuestionsAfterAnswer.description": "تنظیم پیشنهاد سوالات بعدی می‌تواند به کاربران یک چت بهتر ارائه دهد.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "پرامپت سفارشی",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "پرامپت مخصوص خود را برای تولید سوالات پیگیری بنویسید و استفاده کنید.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "پرامپت پیش‌فرض سیستم",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "از پرامپت داخلی برای تولید سوالات پیگیری استفاده کنید.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "یک پرامپت سفارشی وارد کنید",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 پیشنهاد برای سوال بعدی کاربر.",
"feature.suggestedQuestionsAfterAnswer.title": "پیگیری",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "سعی کنید بپرسید",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "La saisie vocale est activée",
"feature.speechToText.title": "Discours en Texte",
"feature.suggestedQuestionsAfterAnswer.description": "La configuration de la suggestion des prochaines questions peut offrir aux utilisateurs une meilleure discussion.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt personnalisé",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Rédigez et utilisez votre propre prompt pour générer des questions de suivi.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt système par défaut",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Utilisez le prompt intégré pour générer des questions de suivi.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Saisissez un prompt personnalisé",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 suggestions pour la prochaine question de l'utilisateur.",
"feature.suggestedQuestionsAfterAnswer.title": "Suivi",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Essayez de demander",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "वॉयस इनपुट सक्रिय है",
"feature.speechToText.title": "वाक् से पाठ",
"feature.suggestedQuestionsAfterAnswer.description": "अगले प्रश्न सुझाव सेट करना उपयोगकर्ताओं को बेहतर चैट दे सकता है।",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "कस्टम प्रॉम्प्ट",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "फ़ॉलो-अप प्रश्न बनाने के लिए अपना स्वयं का प्रॉम्प्ट लिखें और उपयोग करें।",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "सिस्टम डिफ़ॉल्ट प्रॉम्प्ट",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "फ़ॉलो-अप प्रश्न बनाने के लिए बिल्ट-इन प्रॉम्प्ट का उपयोग करें।",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "कस्टम प्रॉम्प्ट दर्ज करें",
"feature.suggestedQuestionsAfterAnswer.resDes": "उपयोगकर्ता के अगले प्रश्न के लिए 3 सुझाव।",
"feature.suggestedQuestionsAfterAnswer.title": "फॉलो-अप",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "पूछने का प्रयास करें",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Input suara diaktifkan",
"feature.speechToText.title": "Ucapan ke Teks",
"feature.suggestedQuestionsAfterAnswer.description": "Menyiapkan saran pertanyaan berikutnya dapat memberi pengguna obrolan yang lebih baik.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt kustom",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Tulis dan gunakan prompt Anda sendiri untuk menghasilkan pertanyaan tindak lanjut.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt default sistem",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Gunakan prompt bawaan untuk menghasilkan pertanyaan tindak lanjut.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Masukkan prompt kustom",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 saran untuk pertanyaan pengguna berikutnya.",
"feature.suggestedQuestionsAfterAnswer.title": "Tindak lanjut",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Cobalah untuk bertanya",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "L'input vocale è abilitato",
"feature.speechToText.title": "Da voce a testo",
"feature.suggestedQuestionsAfterAnswer.description": "Impostare suggerimenti per le prossime domande può offrire agli utenti una chat migliore.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt personalizzato",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Scrivi e usa il tuo prompt per generare domande di follow-up.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt predefinito di sistema",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Usa il prompt integrato per generare domande di follow-up.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Inserisci un prompt personalizzato",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 suggerimenti per la prossima domanda dell'utente.",
"feature.suggestedQuestionsAfterAnswer.title": "Follow-up",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Prova a chiedere",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "音声入力が有効になっています",
"feature.speechToText.title": "音声からテキストへ",
"feature.suggestedQuestionsAfterAnswer.description": "次の質問の提案を設定すると、ユーザーにより良いチャットが提供されます。",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "カスタムプロンプト",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "フォローアップ質問を生成するための独自のプロンプトを作成して使用します。",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "システムデフォルトのプロンプト",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "フォローアップ質問の生成には組み込みのプロンプトを使用します。",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "カスタムプロンプトを入力",
"feature.suggestedQuestionsAfterAnswer.resDes": "ユーザーの次の質問に関する 3 つの提案。",
"feature.suggestedQuestionsAfterAnswer.title": "フォローアップ",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "質問してみてください",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "음성 입력이 활성화되어 있습니다",
"feature.speechToText.title": "음성에서 텍스트로",
"feature.suggestedQuestionsAfterAnswer.description": "다음 질문 제안을 설정하면 사용자에게 더 나은 채팅이 제공됩니다.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "사용자 지정 프롬프트",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "후속 질문 생성을 위한 자체 프롬프트를 작성하고 사용합니다.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "시스템 기본 프롬프트",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "후속 질문 생성을 위해 내장 프롬프트를 사용합니다.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "사용자 지정 프롬프트 입력",
"feature.suggestedQuestionsAfterAnswer.resDes": "사용자의 다음 질문에 대한 3 가지 제안.",
"feature.suggestedQuestionsAfterAnswer.title": "팔로우업",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "질문해보세요",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Voice input is enabled",
"feature.speechToText.title": "Speech to Text",
"feature.suggestedQuestionsAfterAnswer.description": "Setting up next questions suggestion can give users a better chat.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Aangepaste prompt",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Schrijf en gebruik je eigen prompt voor het genereren van vervolgvragen.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Standaard systeemprompt",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Gebruik de ingebouwde prompt voor het genereren van vervolgvragen.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Voer een aangepaste prompt in",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 suggestions for user next question.",
"feature.suggestedQuestionsAfterAnswer.title": "Follow-up",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Try to ask",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Wprowadzanie głosowe jest włączone",
"feature.speechToText.title": "Mowa na tekst",
"feature.suggestedQuestionsAfterAnswer.description": "Ustawienie kolejnych pytań może poprawić czat.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Własny prompt",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Napisz i użyj własnego promptu do generowania pytań uzupełniających.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Domyślny prompt systemowy",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Użyj wbudowanego promptu do generowania pytań uzupełniających.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Wpisz własny prompt",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 sugestie dla kolejnego pytania użytkownika.",
"feature.suggestedQuestionsAfterAnswer.title": "Nawiązanie",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Spróbuj zapytać",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Entrada de voz está ativada",
"feature.speechToText.title": "Fala para Texto",
"feature.suggestedQuestionsAfterAnswer.description": "Configurar sugestões de próximas perguntas pode proporcionar um melhor chat aos usuários.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt personalizado",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Escreva e use seu próprio prompt para gerar perguntas de acompanhamento.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt padrão do sistema",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Use o prompt integrado para gerar perguntas de acompanhamento.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Digite um prompt personalizado",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 sugestões para a próxima pergunta do usuário.",
"feature.suggestedQuestionsAfterAnswer.title": "Perguntas de Acompanhamento",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Tente perguntar",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Intrarea vocală este activată",
"feature.speechToText.title": "Voce la text",
"feature.suggestedQuestionsAfterAnswer.description": "Setarea sugestiilor pentru întrebările următoare poate oferi utilizatorilor o conversație mai bună.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt personalizat",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Scrie și folosește propriul tău prompt pentru a genera întrebări de follow-up.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt implicit al sistemului",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Folosește promptul integrat pentru a genera întrebări de follow-up.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Introdu un prompt personalizat",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 sugestii pentru următoarea întrebare a utilizatorului.",
"feature.suggestedQuestionsAfterAnswer.title": "Urmărire",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Încercați să întrebați",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Голосовой ввод включен",
"feature.speechToText.title": "Преобразование речи в текст",
"feature.suggestedQuestionsAfterAnswer.description": "Настройка предложения следующих вопросов может улучшить чат для пользователей.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Пользовательский промпт",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Напишите и используйте собственный промпт для генерации последующих вопросов.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Системный промпт по умолчанию",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Используйте встроенный промпт для генерации последующих вопросов.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Введите пользовательский промпт",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 предложения для следующего вопроса пользователя.",
"feature.suggestedQuestionsAfterAnswer.title": "Последующие вопросы",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Попробуйте спросить",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Glasovni vnos je omogočen.",
"feature.speechToText.title": "Govor v besedilo",
"feature.suggestedQuestionsAfterAnswer.description": "Nastavitev predlogov za naslednja vprašanja lahko uporabnikom izboljša klepet.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prilagojen poziv",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Napišite in uporabite svoj poziv za ustvarjanje nadaljnjih vprašanj.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Privzeti sistemski poziv",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Za ustvarjanje nadaljnjih vprašanj uporabite vgrajeni poziv.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Vnesite prilagojen poziv",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 predlogi za naslednje vprašanje uporabnika.",
"feature.suggestedQuestionsAfterAnswer.title": "Nadaljnja vprašanja",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Poskusi vprašati",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "เปิดใช้งานการป้อนข้อมูลด้วยเสียง",
"feature.speechToText.title": "คําพูดเป็นข้อความ",
"feature.suggestedQuestionsAfterAnswer.description": "การตั้งค่าคําแนะนําคําถามถัดไปจะช่วยให้ผู้ใช้แชทได้ดีขึ้น",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "พรอมต์กำหนดเอง",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "เขียนและใช้พรอมต์ของคุณเองเพื่อสร้างคำถามติดตาม",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "พรอมต์เริ่มต้นของระบบ",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "ใช้พรอมต์ที่มีอยู่ในระบบเพื่อสร้างคำถามติดตาม",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "ป้อนพรอมต์กำหนดเอง",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 ข้อเสนอแนะสําหรับผู้ใช้คําถามถัดไป",
"feature.suggestedQuestionsAfterAnswer.title": "ติดตาม",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "ลองถาม",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Sesli giriş etkinleştirildi",
"feature.speechToText.title": "Sesten Metne",
"feature.suggestedQuestionsAfterAnswer.description": "Sonraki soru önerilerini ayarlamak, kullanıcılara daha iyi bir sohbet deneyimi sunabilir.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Özel istem",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Takip soruları oluşturmak için kendi isteminizi yazın ve kullanın.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Sistem varsayılan istemi",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Takip soruları oluşturmak için yerleşik istemi kullanın.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Özel istem girin",
"feature.suggestedQuestionsAfterAnswer.resDes": "Kullanıcı için 3 önerilen sonraki soru.",
"feature.suggestedQuestionsAfterAnswer.title": "Takip Soruları",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Sormayı dene",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Голосовий ввід увімкнено",
"feature.speechToText.title": "Мовлення в текст",
"feature.suggestedQuestionsAfterAnswer.description": "Налаштування пропозицій наступних запитань може надати користувачам кращий чат.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Користувацький промпт",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Напишіть і використовуйте власний промпт для генерації наступних запитань.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Системний промпт за замовчуванням",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Використовуйте вбудований промпт для генерації наступних запитань.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Введіть користувацький промпт",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 пропозиції для наступного питання користувача.",
"feature.suggestedQuestionsAfterAnswer.title": "Наступні",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Спробуйте спитати",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "Đã bật đầu vào bằng giọng nói",
"feature.speechToText.title": "Chuyển đổi giọng nói thành văn bản",
"feature.suggestedQuestionsAfterAnswer.description": "Thiết lập đề xuất câu hỏi tiếp theo có thể tạo ra cuộc trò chuyện tốt hơn cho người dùng.",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "Prompt tùy chỉnh",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "Viết và sử dụng prompt riêng để tạo câu hỏi tiếp theo.",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "Prompt mặc định của hệ thống",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "Sử dụng prompt tích hợp để tạo câu hỏi tiếp theo.",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "Nhập prompt tùy chỉnh",
"feature.suggestedQuestionsAfterAnswer.resDes": "3 đề xuất cho câu hỏi tiếp theo của người dùng.",
"feature.suggestedQuestionsAfterAnswer.title": "Câu hỏi gợi ý",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "Thử hỏi",

View File

@ -160,6 +160,15 @@
"feature.speechToText.resDes": "语音输入已启用",
"feature.speechToText.title": "语音转文字",
"feature.suggestedQuestionsAfterAnswer.description": "设置下一步问题建议可以让用户更好的对话。",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "自定义提示词",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "编写并使用你自己的下一步问题生成提示词。",
"feature.suggestedQuestionsAfterAnswer.modal.defaultModel": "系统默认模型",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "系统默认提示词",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "使用内置提示词生成下一步问题。",
"feature.suggestedQuestionsAfterAnswer.modal.modelLabel": "模型",
"feature.suggestedQuestionsAfterAnswer.modal.promptLabel": "提示词",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "输入自定义提示词",
"feature.suggestedQuestionsAfterAnswer.modal.title": "下一步问题建议设置",
"feature.suggestedQuestionsAfterAnswer.resDes": "回答结束后系统会给出 3 个建议",
"feature.suggestedQuestionsAfterAnswer.title": "下一步问题建议",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "试着问问",

View File

@ -160,6 +160,11 @@
"feature.speechToText.resDes": "語音輸入已啟用",
"feature.speechToText.title": "語音轉文字",
"feature.suggestedQuestionsAfterAnswer.description": "設定下一步問題建議可以讓使用者更好的對話。",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOption": "自訂提示詞",
"feature.suggestedQuestionsAfterAnswer.modal.customPromptOptionDescription": "編寫並使用你自己的下一步問題生成提示詞。",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption": "系統預設提示詞",
"feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOptionDescription": "使用內建提示詞生成下一步問題。",
"feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder": "輸入自訂提示詞",
"feature.suggestedQuestionsAfterAnswer.resDes": "回答結束後系統會給出 3 個建議",
"feature.suggestedQuestionsAfterAnswer.title": "下一步問題建議",
"feature.suggestedQuestionsAfterAnswer.tryToAsk": "試著問問",

View File

@ -9,7 +9,7 @@ import type {
RerankingModeEnum,
WeightedScoreEnum,
} from '@/models/datasets'
import type { AgentStrategy, ModelModeType, RETRIEVE_TYPE, ToolItem, TtsAutoPlay } from '@/types/app'
import type { AgentStrategy, Model, ModelModeType, RETRIEVE_TYPE, ToolItem, TtsAutoPlay } from '@/types/app'
export type Inputs = Record<string, string | number | object | boolean>
@ -74,7 +74,10 @@ export type MoreLikeThisConfig = {
enabled: boolean
}
export type SuggestedQuestionsAfterAnswerConfig = MoreLikeThisConfig
export type SuggestedQuestionsAfterAnswerConfig = MoreLikeThisConfig & {
model?: Model
prompt?: string
}
export type SpeechToTextConfig = MoreLikeThisConfig

View File

@ -230,6 +230,8 @@ export type ModelConfig = {
}
suggested_questions_after_answer: {
enabled: boolean
model?: Model
prompt?: string
}
speech_to_text: {
enabled: boolean