Merge branch 'refs/heads/main' into feat/workflow-parallel-support

# Conflicts:
#	api/core/workflow/workflow_entry.py
This commit is contained in:
takatost 2024-07-24 23:43:14 +08:00
commit 833584ba76
128 changed files with 4008 additions and 1419 deletions

View File

@ -89,6 +89,5 @@ jobs:
pgvecto-rs
pgvector
chroma
myscale
- name: Test Vector Stores
run: poetry run -C api bash dev/pytest/pytest_vdb.sh

View File

@ -12,7 +12,8 @@
```bash
cd ../docker
cp middleware.env.example middleware.env
docker compose -f docker-compose.middleware.yaml -p dify up -d
# change the profile to other vector database if you are not using weaviate
docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d
cd ../api
```

View File

@ -9,7 +9,7 @@ class PackagingInfo(BaseSettings):
CURRENT_VERSION: str = Field(
description='Dify version',
default='0.6.14',
default='0.6.15',
)
COMMIT_SHA: str = Field(

View File

@ -22,7 +22,7 @@ from fields.conversation_fields import (
)
from libs.helper import datetime_string
from libs.login import login_required
from models.model import AppMode, Conversation, Message, MessageAnnotation
from models.model import AppMode, Conversation, EndUser, Message, MessageAnnotation
class CompletionConversationApi(Resource):
@ -156,19 +156,31 @@ class ChatConversationApi(Resource):
parser.add_argument('limit', type=int_range(1, 100), required=False, default=20, location='args')
args = parser.parse_args()
subquery = (
db.session.query(
Conversation.id.label('conversation_id'),
EndUser.session_id.label('from_end_user_session_id')
)
.outerjoin(EndUser, Conversation.from_end_user_id == EndUser.id)
.subquery()
)
query = db.select(Conversation).where(Conversation.app_id == app_model.id)
if args['keyword']:
keyword_filter = '%{}%'.format(args['keyword'])
query = query.join(
Message, Message.conversation_id == Conversation.id
Message, Message.conversation_id == Conversation.id,
).join(
subquery, subquery.c.conversation_id == Conversation.id
).filter(
or_(
Message.query.ilike('%{}%'.format(args['keyword'])),
Message.answer.ilike('%{}%'.format(args['keyword'])),
Conversation.name.ilike('%{}%'.format(args['keyword'])),
Conversation.introduction.ilike('%{}%'.format(args['keyword'])),
Message.query.ilike(keyword_filter),
Message.answer.ilike(keyword_filter),
Conversation.name.ilike(keyword_filter),
Conversation.introduction.ilike(keyword_filter),
subquery.c.from_end_user_session_id.ilike(keyword_filter)
),
)
account = current_user

View File

@ -22,17 +22,19 @@ class RuleGenerateApi(Resource):
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('audiences', type=str, required=True, nullable=False, location='json')
parser.add_argument('hoping_to_solve', type=str, required=True, nullable=False, location='json')
parser.add_argument('instruction', type=str, required=True, nullable=False, location='json')
parser.add_argument('model_config', type=dict, required=True, nullable=False, location='json')
parser.add_argument('no_variable', type=bool, required=True, default=False, location='json')
args = parser.parse_args()
account = current_user
try:
rules = LLMGenerator.generate_rule_config(
account.current_tenant_id,
args['audiences'],
args['hoping_to_solve']
tenant_id=account.current_tenant_id,
instruction=args['instruction'],
model_config=args['model_config'],
no_variable=args['no_variable']
)
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)

View File

@ -62,7 +62,12 @@ class DatasetConfigManager:
return None
# dataset configs
dataset_configs = config.get('dataset_configs', {'retrieval_model': 'single'})
if 'dataset_configs' in config and config.get('dataset_configs'):
dataset_configs = config.get('dataset_configs')
else:
dataset_configs = {
'retrieval_model': 'multiple'
}
query_variable = config.get('dataset_query_variable')
if dataset_configs['retrieval_model'] == 'single':
@ -83,9 +88,10 @@ class DatasetConfigManager:
retrieve_strategy=DatasetRetrieveConfigEntity.RetrieveStrategy.value_of(
dataset_configs['retrieval_model']
),
top_k=dataset_configs.get('top_k'),
top_k=dataset_configs.get('top_k', 4),
score_threshold=dataset_configs.get('score_threshold'),
reranking_model=dataset_configs.get('reranking_model')
reranking_model=dataset_configs.get('reranking_model'),
weights=dataset_configs.get('weights')
)
)
@ -114,12 +120,6 @@ class DatasetConfigManager:
if not isinstance(config["dataset_configs"], dict):
raise ValueError("dataset_configs must be of object type")
if config["dataset_configs"]['retrieval_model'] == 'multiple':
if not config["dataset_configs"]['reranking_model']:
raise ValueError("reranking_model has not been set")
if not isinstance(config["dataset_configs"]['reranking_model'], dict):
raise ValueError("reranking_model must be of object type")
if not isinstance(config["dataset_configs"], dict):
raise ValueError("dataset_configs must be of object type")

View File

@ -159,7 +159,11 @@ class DatasetRetrieveConfigEntity(BaseModel):
retrieve_strategy: RetrieveStrategy
top_k: Optional[int] = None
score_threshold: Optional[float] = None
rerank_mode: Optional[str] = 'reranking_model'
reranking_model: Optional[dict] = None
weights: Optional[dict] = None
class DatasetEntity(BaseModel):

View File

@ -1,11 +1,12 @@
from .segment_group import SegmentGroup
from .segments import Segment
from .segments import NoneSegment, Segment
from .types import SegmentType
from .variables import (
ArrayVariable,
FileVariable,
FloatVariable,
IntegerVariable,
NoneVariable,
ObjectVariable,
SecretVariable,
StringVariable,
@ -23,5 +24,7 @@ __all__ = [
'Variable',
'SegmentType',
'SegmentGroup',
'Segment'
'Segment',
'NoneSegment',
'NoneVariable',
]

View File

@ -10,6 +10,7 @@ from .variables import (
FileVariable,
FloatVariable,
IntegerVariable,
NoneVariable,
ObjectVariable,
SecretVariable,
StringVariable,
@ -39,6 +40,8 @@ def build_variable_from_mapping(m: Mapping[str, Any], /) -> Variable:
def build_anonymous_variable(value: Any, /) -> Variable:
if value is None:
return NoneVariable(name='anonymous')
if isinstance(value, str):
return StringVariable(name='anonymous', value=value)
if isinstance(value, int):

View File

@ -43,6 +43,23 @@ class Segment(BaseModel):
return self.value
class NoneSegment(Segment):
value_type: SegmentType = SegmentType.NONE
value: None = None
@property
def text(self) -> str:
return 'null'
@property
def log(self) -> str:
return 'null'
@property
def markdown(self) -> str:
return 'null'
class StringSegment(Segment):
value_type: SegmentType = SegmentType.STRING
value: str

View File

@ -2,16 +2,10 @@ from enum import Enum
class SegmentType(str, Enum):
STRING = 'string'
NONE = 'none'
NUMBER = 'number'
FILE = 'file'
STRING = 'string'
SECRET = 'secret'
OBJECT = 'object'
ARRAY = 'array'
ARRAY_STRING = 'array[string]'
ARRAY_NUMBER = 'array[number]'
ARRAY_OBJECT = 'array[object]'
ARRAY_FILE = 'array[file]'
OBJECT = 'object'
FILE = 'file'

View File

@ -6,7 +6,7 @@ from pydantic import Field
from core.file.file_obj import FileVar
from core.helper import encrypter
from .segments import Segment, StringSegment
from .segments import NoneSegment, Segment, StringSegment
from .types import SegmentType
@ -20,6 +20,7 @@ class Variable(Segment):
description="Unique identity for variable. It's only used by environment variables now.",
)
name: str
description: str = Field(default='', description='Description of the variable.')
class StringVariable(StringSegment, Variable):
@ -81,3 +82,8 @@ class SecretVariable(StringVariable):
@property
def log(self) -> str:
return encrypter.obfuscated_token(self.value)
class NoneVariable(NoneSegment, Variable):
value_type: SegmentType = SegmentType.NONE
value: None = None

View File

@ -3,10 +3,13 @@ import logging
import re
from typing import Optional
from core.llm_generator.output_parser.errors import OutputParserException
from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.llm_generator.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.llm_generator.prompts import CONVERSATION_TITLE_PROMPT, GENERATOR_QA_PROMPT
from core.llm_generator.prompts import (
CONVERSATION_TITLE_PROMPT,
GENERATOR_QA_PROMPT,
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
)
from core.model_manager import ModelManager
from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import ModelType
@ -115,55 +118,158 @@ class LLMGenerator:
return questions
@classmethod
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
def generate_rule_config(cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool) -> dict:
output_parser = RuleConfigGeneratorOutputParser()
error = ""
error_step = ""
rule_config = {
"prompt": "",
"variables": [],
"opening_statement": "",
"error": ""
}
model_parameters = {
"max_tokens": 512,
"temperature": 0.01
}
if no_variable:
prompt_template = PromptTemplateParser(
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE
)
prompt_generate = prompt_template.format(
inputs={
"TASK_DESCRIPTION": instruction,
},
remove_template_variables=False
)
prompt_messages = [UserPromptMessage(content=prompt_generate)]
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
)
try:
response = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=model_parameters,
stream=False
)
rule_config["prompt"] = response.message.content
except InvokeError as e:
error = str(e)
error_step = "generate rule config"
except Exception as e:
logging.exception(e)
rule_config["error"] = str(e)
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
return rule_config
# get rule config prompt, parameter and statement
prompt_generate, parameter_generate, statement_generate = output_parser.get_format_instructions()
prompt_template = PromptTemplateParser(
template=output_parser.get_format_instructions()
prompt_generate
)
prompt = prompt_template.format(
parameter_template = PromptTemplateParser(
parameter_generate
)
statement_template = PromptTemplateParser(
statement_generate
)
# format the prompt_generate_prompt
prompt_generate_prompt = prompt_template.format(
inputs={
"audiences": audiences,
"hoping_to_solve": hoping_to_solve,
"variable": "{{variable}}",
"lanA": "{{lanA}}",
"lanB": "{{lanB}}",
"topic": "{{topic}}"
"TASK_DESCRIPTION": instruction,
},
remove_template_variables=False
)
prompt_messages = [UserPromptMessage(content=prompt_generate_prompt)]
# get model instance
model_manager = ModelManager()
model_instance = model_manager.get_default_model_instance(
model_instance = model_manager.get_model_instance(
tenant_id=tenant_id,
model_type=ModelType.LLM,
provider=model_config.get("provider") if model_config else None,
model=model_config.get("name") if model_config else None,
)
prompt_messages = [UserPromptMessage(content=prompt)]
try:
response = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters={
"max_tokens": 512,
"temperature": 0
},
stream=False
)
try:
# the first step to generate the task prompt
prompt_content = model_instance.invoke_llm(
prompt_messages=prompt_messages,
model_parameters=model_parameters,
stream=False
)
except InvokeError as e:
error = str(e)
error_step = "generate prefix prompt"
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
return rule_config
rule_config["prompt"] = prompt_content.message.content
parameter_generate_prompt = parameter_template.format(
inputs={
"INPUT_TEXT": prompt_content.message.content,
},
remove_template_variables=False
)
parameter_messages = [UserPromptMessage(content=parameter_generate_prompt)]
# the second step to generate the task_parameter and task_statement
statement_generate_prompt = statement_template.format(
inputs={
"TASK_DESCRIPTION": instruction,
"INPUT_TEXT": prompt_content.message.content,
},
remove_template_variables=False
)
statement_messages = [UserPromptMessage(content=statement_generate_prompt)]
try:
parameter_content = model_instance.invoke_llm(
prompt_messages=parameter_messages,
model_parameters=model_parameters,
stream=False
)
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.content)
except InvokeError as e:
error = str(e)
error_step = "generate variables"
try:
statement_content = model_instance.invoke_llm(
prompt_messages=statement_messages,
model_parameters=model_parameters,
stream=False
)
rule_config["opening_statement"] = statement_content.message.content
except InvokeError as e:
error = str(e)
error_step = "generate conversation opener"
rule_config = output_parser.parse(response.message.content)
except InvokeError as e:
raise e
except OutputParserException:
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
except Exception as e:
logging.exception(e)
rule_config = {
"prompt": "",
"variables": [],
"opening_statement": ""
}
rule_config["error"] = str(e)
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
return rule_config

View File

@ -1,14 +1,18 @@
from typing import Any
from core.llm_generator.output_parser.errors import OutputParserException
from core.llm_generator.prompts import RULE_CONFIG_GENERATE_TEMPLATE
from core.llm_generator.prompts import (
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE,
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE,
)
from libs.json_in_md_parser import parse_and_check_json_markdown
class RuleConfigGeneratorOutputParser:
def get_format_instructions(self) -> str:
return RULE_CONFIG_GENERATE_TEMPLATE
def get_format_instructions(self) -> tuple[str, str, str]:
return RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE, RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE
def parse(self, text: str) -> Any:
try:

View File

@ -81,65 +81,73 @@ GENERATOR_QA_PROMPT = (
'<QA Pairs>'
)
RULE_CONFIG_GENERATE_TEMPLATE = """Given MY INTENDED AUDIENCES and HOPING TO SOLVE using a language model, please select \
the model prompt that best suits the input.
You will be provided with the prompt, variables, and an opening statement.
Only the content enclosed in double curly braces, such as {{variable}}, in the prompt can be considered as a variable; \
otherwise, it cannot exist as a variable in the variables.
If you believe revising the original input will result in a better response from the language model, you may \
suggest revisions.
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
Here is a task description for which I would like you to create a high-quality prompt template for:
<task_description>
{{TASK_DESCRIPTION}}
</task_description>
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
- Do not inlcude <input> or <output> section and variables in the prompt, assume user will add them at their own will.
- Clear instructions for the AI that will be using this prompt, demarcated with <instructions> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not include variables in the prompt. Give three pairs of input and output examples.
- Include other relevant sections demarcated with appropriate XML tags like <examples>, <instructions>.
- Use the same language as task description.
- Output in ``` xml ``` and start with <instruction>
Please generate the full prompt template with at least 300 words and output only the prompt template.
"""
<<PRINCIPLES OF GOOD PROMPT>>
Integrate the intended audience in the prompt e.g. the audience is an expert in the field.
Break down complex tasks into a sequence of simpler prompts in an interactive conversation.
Implement example-driven prompting (Use few-shot prompting).
When formatting your prompt start with Instruction followed by either Example if relevant. \
Subsequently present your content. Use one or more line breaks to separate instructions examples questions context and input data.
Incorporate the following phrases: Your task is and You MUST.
Incorporate the following phrases: You will be penalized.
Use leading words like writing think step by step.
Add to your prompt the following phrase Ensure that your answer is unbiased and does not rely on stereotypes.
Assign a role to the large language models.
Use Delimiters.
To write an essay /text /paragraph /article or any type of text that should be detailed: Write a detailed [essay/text/paragraph] for me on [topic] in detail by adding all the information necessary.
Clearly state the requirements that the model must follow in order to produce content in the form of the keywords regulations hint or instructions
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
Here is a task description for which I would like you to create a high-quality prompt template for:
<task_description>
{{TASK_DESCRIPTION}}
</task_description>
Based on task description, please create a well-structured prompt template that another AI could use to consistently complete the task. The prompt template should include:
- Descriptive variable names surrounded by {{ }} (two curly brackets) to indicate where the actual values will be substituted in. Choose variable names that clearly indicate the type of value expected. Variable names have to be composed of number, english alphabets and underline and nothing else.
- Clear instructions for the AI that will be using this prompt, demarcated with <instructions> tags. The instructions should provide step-by-step directions on how to complete the task using the input variables. Also Specifies in the instructions that the output should not contain any xml tag.
- Relevant examples if needed to clarify the task further, demarcated with <example> tags. Do not use curly brackets any other than in <instruction> section.
- Any other relevant sections demarcated with appropriate XML tags like <input>, <output>, etc.
- Use the same language as task description.
- Output in ``` xml ``` and start with <instruction>
Please generate the full prompt template and output only the prompt template.
"""
<< FORMATTING >>
Return a markdown code snippet with a JSON object formatted to look like, \
no any other string out of markdown code snippet:
```json
{{{{
"prompt": string \\ generated prompt
"variables": list of string \\ variables
"opening_statement": string \\ an opening statement to guide users on how to ask questions with generated prompt \
and fill in variables, with a welcome sentence, and keep TLDR.
}}}}
```
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """
I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted.
<information to be extracted>
variables name bounded two double curly brackets. Variable name has to be composed of number, english alphabets and underline and nothing else.
</information to be extracted>
<< EXAMPLES >>
[EXAMPLE A]
```json
{
"prompt": "I need your help to translate the following {{Input_language}}paper paragraph into {{Target_language}}, in a style similar to a popular science magazine in {{Target_language}}. #### Rules Ensure accurate conveyance of the original text's facts and context during translation. Maintain the original paragraph format and retain technical terms and company abbreviations ",
"variables": ["Input_language", "Target_language"],
"opening_statement": " Hi. I am your translation assistant. I can help you with any translation and ensure accurate conveyance of information. "
}
```
Step 1: Carefully read the input and understand the structure of the expected output.
Step 2: Extract relevant parameters from the provided text based on the name and description of object.
Step 3: Structure the extracted parameters to JSON object as specified in <structure>.
Step 4: Ensure that the list of variable_names is properly formatted and valid. The output should not contain any XML tags. Output an empty list if there is no valid variable name in input text.
[EXAMPLE B]
```json
{
"prompt": "Your task is to review the provided meeting notes and create a concise summary that captures the essential information, focusing on key takeaways and action items assigned to specific individuals or departments during the meeting. Use clear and professional language, and organize the summary in a logical manner using appropriate formatting such as headings, subheadings, and bullet points. Ensure that the summary is easy to understand and provides a comprehensive but succinct overview of the meeting's content, with a particular focus on clearly indicating who is responsible for each action item.",
"variables": ["meeting_notes"],
"opening_statement": "Hi! I'm your meeting notes summarizer AI. I can help you with any meeting notes and ensure accurate conveyance of information."
}
```
### Structure
Here is the structure of the expected output, I should always follow the output structure.
["variable_name_1", "variable_name_2"]
<< MY INTENDED AUDIENCES >>
{{audiences}}
### Input Text
Inside <text></text> XML tags, there is a text that I should extract parameters and convert to a JSON object.
<text>
{{INPUT_TEXT}}
</text>
<< HOPING TO SOLVE >>
{{hoping_to_solve}}
### Answer
I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text.
"""
<< OUTPUT >>
"""
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """
<instruction>
Step 1: Identify the purpose of the chatbot from the variable {{TASK_DESCRIPTION}} and infer chatbot's tone (e.g., friendly, professional, etc.) to add personality traits.
Step 2: Create a coherent and engaging opening statement.
Step 3: Ensure the output is welcoming and clearly explains what the chatbot is designed to do. Do not include any XML tags in the output.
Please use the same language as the user's input language. If user uses chinese then generate opening statement in chinese, if user uses english then generate opening statement in english.
Example Input:
Provide customer support for an e-commerce website
Example Output:
Welcome! I'm here to assist you with any questions or issues you might have with your shopping experience. Whether you're looking for product information, need help with your order, or have any other inquiries, feel free to ask. I'm friendly, helpful, and ready to support you in any way I can.
<Task>
Here is the task description: {{INPUT_TEXT}}
You just need to generate the output
"""

View File

@ -86,6 +86,9 @@
- `agent-thought` Agent reasoning, generally over 70B with thought chain capability.
- `vision` Vision, i.e., image understanding.
- `tool-call`
- `multi-tool-call`
- `stream-tool-call`
### FetchFrom

View File

@ -87,6 +87,9 @@
- `agent-thought` Agent 推理,一般超过 70B 有思维链能力。
- `vision` 视觉,即:图像理解。
- `tool-call` 工具调用
- `multi-tool-call` 多工具调用
- `stream-tool-call` 流式工具调用
### FetchFrom

View File

@ -0,0 +1,7 @@
- llama-3.1-405b-reasoning
- llama-3.1-70b-versatile
- llama-3.1-8b-instant
- llama3-70b-8192
- llama3-8b-8192
- mixtral-8x7b-32768
- llama2-70b-4096

View File

@ -0,0 +1,25 @@
model: llama-3.1-405b-reasoning
label:
zh_Hans: Llama-3.1-405b-reasoning
en_US: Llama-3.1-405b-reasoning
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.1'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,25 @@
model: llama-3.1-70b-versatile
label:
zh_Hans: Llama-3.1-70b-versatile
en_US: Llama-3.1-70b-versatile
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.1'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,25 @@
model: llama-3.1-8b-instant
label:
zh_Hans: Llama-3.1-8b-instant
en_US: Llama-3.1-8b-instant
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 8192
pricing:
input: '0.05'
output: '0.1'
unit: '0.000001'
currency: USD

View File

@ -12,6 +12,9 @@
- google/gemini-pro
- cohere/command-r-plus
- cohere/command-r
- meta-llama/llama-3.1-405b-instruct
- meta-llama/llama-3.1-70b-instruct
- meta-llama/llama-3.1-8b-instruct
- meta-llama/llama-3-70b-instruct
- meta-llama/llama-3-8b-instruct
- mistralai/mixtral-8x22b-instruct

View File

@ -0,0 +1,23 @@
model: meta-llama/llama-3.1-405b-instruct
label:
en_US: llama-3.1-405b-instruct
model_type: llm
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
required: true
default: 512
min: 1
max: 128000
pricing:
input: "3"
output: "3"
unit: "0.000001"
currency: USD

View File

@ -0,0 +1,23 @@
model: meta-llama/llama-3.1-70b-instruct
label:
en_US: llama-3.1-70b-instruct
model_type: llm
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
required: true
default: 512
min: 1
max: 128000
pricing:
input: "0.9"
output: "0.9"
unit: "0.000001"
currency: USD

View File

@ -0,0 +1,23 @@
model: meta-llama/llama-3.1-8b-instruct
label:
en_US: llama-3.1-8b-instruct
model_type: llm
model_properties:
mode: chat
context_size: 128000
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: max_tokens
use_template: max_tokens
required: true
default: 512
min: 1
max: 128000
pricing:
input: "0.2"
output: "0.2"
unit: "0.000001"
currency: USD

View File

@ -262,6 +262,10 @@ You should also complete the text started with ``` but not tell ``` directly.
:param prompt_messages: prompt messages
:return: llm response
"""
if response.status_code != 200 and response.status_code != HTTPStatus.OK:
raise ServiceUnavailableError(
response.message
)
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=response.output.choices[0].message.content,
@ -421,7 +425,7 @@ You should also complete the text started with ``` but not tell ``` directly.
raise ValueError(f"Got unknown type {message}")
return message_text
def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str:
"""
Format a list of messages into a full prompt for the Anthropic model
@ -496,6 +500,9 @@ You should also complete the text started with ``` but not tell ``` directly.
tongyi_messages.append({
'role': 'assistant',
'content': content if not rich_content else [{"text": content}],
'tool_calls': [tool_call.model_dump() for tool_call in
prompt_message.tool_calls] if prompt_message.tool_calls else []
})
elif isinstance(prompt_message, ToolPromptMessage):
tongyi_messages.append({

View File

@ -25,7 +25,6 @@ def measure_time():
yield timing_info
finally:
timing_info['end'] = datetime.now()
print(f"Execution time: {timing_info['end'] - timing_info['start']}")
def replace_text_with_content(data):

View File

@ -5,15 +5,20 @@ from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.errors.invoke import InvokeAuthorizationError
from core.rag.data_post_processor.reorder import ReorderRunner
from core.rag.models.document import Document
from core.rag.rerank.rerank import RerankRunner
from core.rag.rerank.constants.rerank_mode import RerankMode
from core.rag.rerank.entity.weight import KeywordSetting, VectorSetting, Weights
from core.rag.rerank.rerank_model import RerankModelRunner
from core.rag.rerank.weight_rerank import WeightRerankRunner
class DataPostProcessor:
"""Interface for data post-processing document.
"""
def __init__(self, tenant_id: str, reranking_model: dict, reorder_enabled: bool = False):
self.rerank_runner = self._get_rerank_runner(reranking_model, tenant_id)
def __init__(self, tenant_id: str, reranking_mode: str,
reranking_model: Optional[dict] = None, weights: Optional[dict] = None,
reorder_enabled: bool = False):
self.rerank_runner = self._get_rerank_runner(reranking_mode, tenant_id, reranking_model, weights)
self.reorder_runner = self._get_reorder_runner(reorder_enabled)
def invoke(self, query: str, documents: list[Document], score_threshold: Optional[float] = None,
@ -26,19 +31,37 @@ class DataPostProcessor:
return documents
def _get_rerank_runner(self, reranking_model: dict, tenant_id: str) -> Optional[RerankRunner]:
if reranking_model:
try:
model_manager = ModelManager()
rerank_model_instance = model_manager.get_model_instance(
tenant_id=tenant_id,
provider=reranking_model['reranking_provider_name'],
model_type=ModelType.RERANK,
model=reranking_model['reranking_model_name']
def _get_rerank_runner(self, reranking_mode: str, tenant_id: str, reranking_model: Optional[dict] = None,
weights: Optional[dict] = None) -> Optional[RerankModelRunner | WeightRerankRunner]:
if reranking_mode == RerankMode.WEIGHTED_SCORE.value and weights:
return WeightRerankRunner(
tenant_id,
Weights(
weight_type=weights['weight_type'],
vector_setting=VectorSetting(
vector_weight=weights['vector_setting']['vector_weight'],
embedding_provider_name=weights['vector_setting']['embedding_provider_name'],
embedding_model_name=weights['vector_setting']['embedding_model_name'],
),
keyword_setting=KeywordSetting(
keyword_weight=weights['keyword_setting']['keyword_weight'],
)
)
except InvokeAuthorizationError:
return None
return RerankRunner(rerank_model_instance)
)
elif reranking_mode == RerankMode.RERANKING_MODEL.value:
if reranking_model:
try:
model_manager = ModelManager()
rerank_model_instance = model_manager.get_model_instance(
tenant_id=tenant_id,
provider=reranking_model['reranking_provider_name'],
model_type=ModelType.RERANK,
model=reranking_model['reranking_model_name']
)
except InvokeAuthorizationError:
return None
return RerankModelRunner(rerank_model_instance)
return None
return None
def _get_reorder_runner(self, reorder_enabled) -> Optional[ReorderRunner]:

View File

@ -1,4 +1,5 @@
import re
from typing import Optional
import jieba
from jieba.analyse import default_tfidf
@ -11,7 +12,7 @@ class JiebaKeywordTableHandler:
def __init__(self):
default_tfidf.stop_words = STOPWORDS
def extract_keywords(self, text: str, max_keywords_per_chunk: int = 10) -> set[str]:
def extract_keywords(self, text: str, max_keywords_per_chunk: Optional[int] = 10) -> set[str]:
"""Extract keywords with JIEBA tfidf."""
keywords = jieba.analyse.extract_tags(
sentence=text,

View File

@ -6,6 +6,7 @@ from flask import Flask, current_app
from core.rag.data_post_processor.data_post_processor import DataPostProcessor
from core.rag.datasource.keyword.keyword_factory import Keyword
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.rerank.constants.rerank_mode import RerankMode
from core.rag.retrieval.retrival_methods import RetrievalMethod
from extensions.ext_database import db
from models.dataset import Dataset
@ -26,13 +27,19 @@ class RetrievalService:
@classmethod
def retrieve(cls, retrival_method: str, dataset_id: str, query: str,
top_k: int, score_threshold: Optional[float] = .0, reranking_model: Optional[dict] = None):
top_k: int, score_threshold: Optional[float] = .0,
reranking_model: Optional[dict] = None, reranking_mode: Optional[str] = None,
weights: Optional[dict] = None):
dataset = db.session.query(Dataset).filter(
Dataset.id == dataset_id
).first()
if not dataset or dataset.available_document_count == 0 or dataset.available_segment_count == 0:
return []
all_documents = []
keyword_search_documents = []
embedding_search_documents = []
full_text_search_documents = []
hybrid_search_documents = []
threads = []
exceptions = []
# retrieval_model source with keyword
@ -87,7 +94,8 @@ class RetrievalService:
raise Exception(exception_message)
if retrival_method == RetrievalMethod.HYBRID_SEARCH.value:
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_mode,
reranking_model, weights, False)
all_documents = data_post_processor.invoke(
query=query,
documents=all_documents,
@ -143,7 +151,9 @@ class RetrievalService:
if documents:
if reranking_model and retrival_method == RetrievalMethod.SEMANTIC_SEARCH.value:
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
data_post_processor = DataPostProcessor(str(dataset.tenant_id),
RerankMode.RERANKING_MODEL.value,
reranking_model, None, False)
all_documents.extend(data_post_processor.invoke(
query=query,
documents=documents,
@ -175,7 +185,9 @@ class RetrievalService:
)
if documents:
if reranking_model and retrival_method == RetrievalMethod.FULL_TEXT_SEARCH.value:
data_post_processor = DataPostProcessor(str(dataset.tenant_id), reranking_model, False)
data_post_processor = DataPostProcessor(str(dataset.tenant_id),
RerankMode.RERANKING_MODEL.value,
reranking_model, None, False)
all_documents.extend(data_post_processor.invoke(
query=query,
documents=documents,

View File

@ -396,9 +396,11 @@ class QdrantVector(BaseVector):
documents = []
for result in results:
if result:
documents.append(self._document_from_scored_point(
document = self._document_from_scored_point(
result, Field.CONTENT_KEY.value, Field.METADATA_KEY.value
))
)
document.metadata['vector'] = result.vector
documents.append(document)
return documents

View File

View File

@ -0,0 +1,8 @@
from enum import Enum
class RerankMode(Enum):
RERANKING_MODEL = 'reranking_model'
WEIGHTED_SCORE = 'weighted_score'

View File

@ -0,0 +1,23 @@
from pydantic import BaseModel
class VectorSetting(BaseModel):
vector_weight: float
embedding_provider_name: str
embedding_model_name: str
class KeywordSetting(BaseModel):
keyword_weight: float
class Weights(BaseModel):
"""Model for weighted rerank."""
weight_type: str
vector_setting: VectorSetting
keyword_setting: KeywordSetting

View File

@ -4,7 +4,7 @@ from core.model_manager import ModelInstance
from core.rag.models.document import Document
class RerankRunner:
class RerankModelRunner:
def __init__(self, rerank_model_instance: ModelInstance) -> None:
self.rerank_model_instance = rerank_model_instance

View File

@ -0,0 +1,178 @@
import math
from collections import Counter
from typing import Optional
import numpy as np
from core.embedding.cached_embedding import CacheEmbedding
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler
from core.rag.models.document import Document
from core.rag.rerank.entity.weight import VectorSetting, Weights
class WeightRerankRunner:
def __init__(self, tenant_id: str, weights: Weights) -> None:
self.tenant_id = tenant_id
self.weights = weights
def run(self, query: str, documents: list[Document], score_threshold: Optional[float] = None,
top_n: Optional[int] = None, user: Optional[str] = None) -> list[Document]:
"""
Run rerank model
:param query: search query
:param documents: documents for reranking
:param score_threshold: score threshold
:param top_n: top n
:param user: unique user id if needed
:return:
"""
docs = []
doc_id = []
unique_documents = []
for document in documents:
if document.metadata['doc_id'] not in doc_id:
doc_id.append(document.metadata['doc_id'])
docs.append(document.page_content)
unique_documents.append(document)
documents = unique_documents
rerank_documents = []
query_scores = self._calculate_keyword_score(query, documents)
query_vector_scores = self._calculate_cosine(self.tenant_id, query, documents, self.weights.vector_setting)
for document, query_score, query_vector_score in zip(documents, query_scores, query_vector_scores):
# format document
score = self.weights.vector_setting.vector_weight * query_vector_score + \
self.weights.keyword_setting.keyword_weight * query_score
if score_threshold and score < score_threshold:
continue
document.metadata['score'] = score
rerank_documents.append(document)
rerank_documents = sorted(rerank_documents, key=lambda x: x.metadata['score'], reverse=True)
return rerank_documents[:top_n] if top_n else rerank_documents
def _calculate_keyword_score(self, query: str, documents: list[Document]) -> list[float]:
"""
Calculate BM25 scores
:param query: search query
:param documents: documents for reranking
:return:
"""
keyword_table_handler = JiebaKeywordTableHandler()
query_keywords = keyword_table_handler.extract_keywords(query, None)
documents_keywords = []
for document in documents:
# get the document keywords
document_keywords = keyword_table_handler.extract_keywords(document.page_content, None)
document.metadata['keywords'] = document_keywords
documents_keywords.append(document_keywords)
# Counter query keywords(TF)
query_keyword_counts = Counter(query_keywords)
# total documents
total_documents = len(documents)
# calculate all documents' keywords IDF
all_keywords = set()
for document_keywords in documents_keywords:
all_keywords.update(document_keywords)
keyword_idf = {}
for keyword in all_keywords:
# calculate include query keywords' documents
doc_count_containing_keyword = sum(1 for doc_keywords in documents_keywords if keyword in doc_keywords)
# IDF
keyword_idf[keyword] = math.log((1 + total_documents) / (1 + doc_count_containing_keyword)) + 1
query_tfidf = {}
for keyword, count in query_keyword_counts.items():
tf = count
idf = keyword_idf.get(keyword, 0)
query_tfidf[keyword] = tf * idf
# calculate all documents' TF-IDF
documents_tfidf = []
for document_keywords in documents_keywords:
document_keyword_counts = Counter(document_keywords)
document_tfidf = {}
for keyword, count in document_keyword_counts.items():
tf = count
idf = keyword_idf.get(keyword, 0)
document_tfidf[keyword] = tf * idf
documents_tfidf.append(document_tfidf)
def cosine_similarity(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum(vec1[x] * vec2[x] for x in intersection)
sum1 = sum(vec1[x] ** 2 for x in vec1.keys())
sum2 = sum(vec2[x] ** 2 for x in vec2.keys())
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
similarities = []
for document_tfidf in documents_tfidf:
similarity = cosine_similarity(query_tfidf, document_tfidf)
similarities.append(similarity)
# for idx, similarity in enumerate(similarities):
# print(f"Document {idx + 1} similarity: {similarity}")
return similarities
def _calculate_cosine(self, tenant_id: str, query: str, documents: list[Document],
vector_setting: VectorSetting) -> list[float]:
"""
Calculate Cosine scores
:param query: search query
:param documents: documents for reranking
:return:
"""
query_vector_scores = []
model_manager = ModelManager()
embedding_model = model_manager.get_model_instance(
tenant_id=tenant_id,
provider=vector_setting.embedding_provider_name,
model_type=ModelType.TEXT_EMBEDDING,
model=vector_setting.embedding_model_name
)
cache_embedding = CacheEmbedding(embedding_model)
query_vector = cache_embedding.embed_query(query)
for document in documents:
# calculate cosine similarity
if 'score' in document.metadata:
query_vector_scores.append(document.metadata['score'])
else:
content_vector = document.metadata['vector']
# transform to NumPy
vec1 = np.array(query_vector)
vec2 = np.array(document.metadata['vector'])
# calculate dot product
dot_product = np.dot(vec1, vec2)
# calculate norm
norm_vec1 = np.linalg.norm(vec1)
norm_vec2 = np.linalg.norm(vec2)
# calculate cosine similarity
cosine_sim = dot_product / (norm_vec1 * norm_vec2)
query_vector_scores.append(cosine_sim)
return query_vector_scores

View File

@ -1,4 +1,6 @@
import math
import threading
from collections import Counter
from typing import Optional, cast
from flask import Flask, current_app
@ -14,9 +16,10 @@ from core.model_runtime.entities.model_entities import ModelFeature, ModelType
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName
from core.ops.utils import measure_time
from core.rag.data_post_processor.data_post_processor import DataPostProcessor
from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler
from core.rag.datasource.retrieval_service import RetrievalService
from core.rag.models.document import Document
from core.rag.rerank.rerank import RerankRunner
from core.rag.retrieval.retrival_methods import RetrievalMethod
from core.rag.retrieval.router.multi_dataset_function_call_router import FunctionCallMultiDatasetRouter
from core.rag.retrieval.router.multi_dataset_react_route import ReactMultiDatasetRouter
@ -132,8 +135,9 @@ class DatasetRetrieval:
app_id, tenant_id, user_id, user_from,
available_datasets, query, retrieve_config.top_k,
retrieve_config.score_threshold,
retrieve_config.reranking_model.get('reranking_provider_name'),
retrieve_config.reranking_model.get('reranking_model_name'),
retrieve_config.rerank_mode,
retrieve_config.reranking_model,
retrieve_config.weights,
message_id,
)
@ -272,7 +276,8 @@ class DatasetRetrieval:
retrival_method=retrival_method, dataset_id=dataset.id,
query=query,
top_k=top_k, score_threshold=score_threshold,
reranking_model=reranking_model
reranking_model=reranking_model,
weights=retrieval_model_config.get('weights', None),
)
self._on_query(query, [dataset_id], app_id, user_from, user_id)
@ -292,14 +297,18 @@ class DatasetRetrieval:
query: str,
top_k: int,
score_threshold: float,
reranking_provider_name: str,
reranking_model_name: str,
reranking_mode: str,
reranking_model: Optional[dict] = None,
weights: Optional[dict] = None,
reranking_enable: bool = True,
message_id: Optional[str] = None,
):
threads = []
all_documents = []
dataset_ids = [dataset.id for dataset in available_datasets]
index_type = None
for dataset in available_datasets:
index_type = dataset.indexing_technique
retrieval_thread = threading.Thread(target=self._retriever, kwargs={
'flask_app': current_app._get_current_object(),
'dataset_id': dataset.id,
@ -311,23 +320,24 @@ class DatasetRetrieval:
retrieval_thread.start()
for thread in threads:
thread.join()
# do rerank for searched documents
model_manager = ModelManager()
rerank_model_instance = model_manager.get_model_instance(
tenant_id=tenant_id,
provider=reranking_provider_name,
model_type=ModelType.RERANK,
model=reranking_model_name
)
rerank_runner = RerankRunner(rerank_model_instance)
if reranking_enable:
# do rerank for searched documents
data_post_processor = DataPostProcessor(tenant_id, reranking_mode,
reranking_model, weights, False)
with measure_time() as timer:
all_documents = rerank_runner.run(
query, all_documents,
score_threshold,
top_k
)
with measure_time() as timer:
all_documents = data_post_processor.invoke(
query=query,
documents=all_documents,
score_threshold=score_threshold,
top_n=top_k
)
else:
if index_type == "economy":
all_documents = self.calculate_keyword_score(query, all_documents, top_k)
elif index_type == "high_quality":
all_documents = self.calculate_vector_score(all_documents, top_k, score_threshold)
self._on_query(query, dataset_ids, app_id, user_from, user_id)
if all_documents:
@ -420,7 +430,8 @@ class DatasetRetrieval:
score_threshold=retrieval_model['score_threshold']
if retrieval_model['score_threshold_enabled'] else None,
reranking_model=retrieval_model['reranking_model']
if retrieval_model['reranking_enable'] else None
if retrieval_model['reranking_enable'] else None,
weights=retrieval_model.get('weights', None),
)
all_documents.extend(documents)
@ -513,3 +524,94 @@ class DatasetRetrieval:
tools.append(tool)
return tools
def calculate_keyword_score(self, query: str, documents: list[Document], top_k: int) -> list[Document]:
"""
Calculate keywords scores
:param query: search query
:param documents: documents for reranking
:return:
"""
keyword_table_handler = JiebaKeywordTableHandler()
query_keywords = keyword_table_handler.extract_keywords(query, None)
documents_keywords = []
for document in documents:
# get the document keywords
document_keywords = keyword_table_handler.extract_keywords(document.page_content, None)
document.metadata['keywords'] = document_keywords
documents_keywords.append(document_keywords)
# Counter query keywords(TF)
query_keyword_counts = Counter(query_keywords)
# total documents
total_documents = len(documents)
# calculate all documents' keywords IDF
all_keywords = set()
for document_keywords in documents_keywords:
all_keywords.update(document_keywords)
keyword_idf = {}
for keyword in all_keywords:
# calculate include query keywords' documents
doc_count_containing_keyword = sum(1 for doc_keywords in documents_keywords if keyword in doc_keywords)
# IDF
keyword_idf[keyword] = math.log((1 + total_documents) / (1 + doc_count_containing_keyword)) + 1
query_tfidf = {}
for keyword, count in query_keyword_counts.items():
tf = count
idf = keyword_idf.get(keyword, 0)
query_tfidf[keyword] = tf * idf
# calculate all documents' TF-IDF
documents_tfidf = []
for document_keywords in documents_keywords:
document_keyword_counts = Counter(document_keywords)
document_tfidf = {}
for keyword, count in document_keyword_counts.items():
tf = count
idf = keyword_idf.get(keyword, 0)
document_tfidf[keyword] = tf * idf
documents_tfidf.append(document_tfidf)
def cosine_similarity(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum(vec1[x] * vec2[x] for x in intersection)
sum1 = sum(vec1[x] ** 2 for x in vec1.keys())
sum2 = sum(vec2[x] ** 2 for x in vec2.keys())
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
similarities = []
for document_tfidf in documents_tfidf:
similarity = cosine_similarity(query_tfidf, document_tfidf)
similarities.append(similarity)
for document, score in zip(documents, similarities):
# format document
document.metadata['score'] = score
documents = sorted(documents, key=lambda x: x.metadata['score'], reverse=True)
return documents[:top_k] if top_k else documents
def calculate_vector_score(self, all_documents: list[Document],
top_k: int, score_threshold: float) -> list[Document]:
filter_documents = []
for document in all_documents:
if document.metadata['score'] >= score_threshold:
filter_documents.append(document)
if not filter_documents:
return []
filter_documents = sorted(filter_documents, key=lambda x: x.metadata['score'], reverse=True)
return filter_documents[:top_k] if top_k else filter_documents

View File

View File

@ -7,7 +7,7 @@ from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCa
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from core.rag.datasource.retrieval_service import RetrievalService
from core.rag.rerank.rerank import RerankRunner
from core.rag.rerank.rerank_model import RerankModelRunner
from core.rag.retrieval.retrival_methods import RetrievalMethod
from core.tools.tool.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool
from extensions.ext_database import db
@ -72,7 +72,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
model=self.reranking_model_name
)
rerank_runner = RerankRunner(rerank_model_instance)
rerank_runner = RerankModelRunner(rerank_model_instance)
all_documents = rerank_runner.run(query, all_documents, self.score_threshold, self.top_k)
for hit_callback in self.hit_callbacks:
@ -180,7 +180,8 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
score_threshold=retrieval_model['score_threshold']
if retrieval_model['score_threshold_enabled'] else None,
reranking_model=retrieval_model['reranking_model']
if retrieval_model['reranking_enable'] else None
if retrieval_model['reranking_enable'] else None,
weights=retrieval_model.get('weights', None),
)
all_documents.extend(documents)

View File

@ -78,7 +78,8 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
score_threshold=retrieval_model['score_threshold']
if retrieval_model['score_threshold_enabled'] else None,
reranking_model=retrieval_model['reranking_model']
if retrieval_model['reranking_enable'] else None
if retrieval_model['reranking_enable'] else None,
weights=retrieval_model.get('weights', None),
)
else:
documents = []

View File

@ -13,13 +13,41 @@ class RerankingModelConfig(BaseModel):
model: str
class VectorSetting(BaseModel):
"""
Vector Setting.
"""
vector_weight: float
embedding_provider_name: str
embedding_model_name: str
class KeywordSetting(BaseModel):
"""
Keyword Setting.
"""
keyword_weight: float
class WeightedScoreConfig(BaseModel):
"""
Weighted score Config.
"""
weight_type: str
vector_setting: VectorSetting
keyword_setting: KeywordSetting
class MultipleRetrievalConfig(BaseModel):
"""
Multiple Retrieval Config.
"""
top_k: int
score_threshold: Optional[float] = None
reranking_mode: str = 'reranking_model'
reranking_enable: bool = True
reranking_model: RerankingModelConfig
weights: WeightedScoreConfig
class ModelConfig(BaseModel):

View File

@ -137,13 +137,38 @@ class KnowledgeRetrievalNode(BaseNode):
planning_strategy=planning_strategy
)
elif node_data.retrieval_mode == DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE.value:
if node_data.multiple_retrieval_config.reranking_mode == 'reranking_model':
reranking_model = {
'reranking_provider_name': node_data.multiple_retrieval_config.reranking_model['provider'],
'reranking_model_name': node_data.multiple_retrieval_config.reranking_model['name']
}
weights = None
elif node_data.multiple_retrieval_config.reranking_mode == 'weighted_score':
reranking_model = None
weights = {
'weight_type': node_data.multiple_retrieval_config.weights.weight_type,
'vector_setting': {
"vector_weight": node_data.multiple_retrieval_config.weights.vector_setting.vector_weight,
"embedding_provider_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_provider_name,
"embedding_model_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_model_name,
},
'keyword_setting': {
"keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight
}
}
else:
reranking_model = None
weights = None
all_documents = dataset_retrieval.multiple_retrieve(self.app_id, self.tenant_id, self.user_id,
self.user_from.value,
available_datasets, query,
node_data.multiple_retrieval_config.top_k,
node_data.multiple_retrieval_config.score_threshold,
node_data.multiple_retrieval_config.reranking_model.provider,
node_data.multiple_retrieval_config.reranking_model.model)
node_data.multiple_retrieval_config.reranking_mode,
reranking_model,
weights,
node_data.multiple_retrieval_config.reranking_enable,
)
context_list = []
if all_documents:

View File

@ -31,15 +31,18 @@ logger = logging.getLogger(__name__)
class WorkflowEntry:
def run_workflow(self, workflow: Workflow,
user_id: str,
user_from: UserFrom,
invoke_from: InvokeFrom,
user_inputs: Mapping[str, Any],
system_inputs: Mapping[SystemVariable, Any],
callbacks: Sequence[BaseWorkflowCallback],
call_depth: int = 0,
variable_pool: Optional[VariablePool] = None) -> None:
def run_workflow(
self,
*,
workflow: Workflow,
user_id: str,
user_from: UserFrom,
invoke_from: InvokeFrom,
user_inputs: Mapping[str, Any],
system_inputs: Mapping[SystemVariable, Any],
callbacks: Sequence[WorkflowCallback],
call_depth: int = 0
) -> None:
"""
:param workflow: Workflow instance
:param user_id: user id
@ -66,12 +69,11 @@ class WorkflowEntry:
raise ValueError('edges in workflow graph must be a list')
# init variable pool
if not variable_pool:
variable_pool = VariablePool(
system_variables=system_inputs,
user_inputs=user_inputs,
environment_variables=workflow.environment_variables,
)
variable_pool = VariablePool(
system_variables=system_inputs,
user_inputs=user_inputs,
environment_variables=workflow.environment_variables,
)
workflow_call_max_depth = dify_config.WORKFLOW_CALL_MAX_DEPTH
if call_depth > workflow_call_max_depth:

View File

@ -18,10 +18,28 @@ reranking_model_fields = {
'reranking_model_name': fields.String
}
keyword_setting_fields = {
'keyword_weight': fields.Float
}
vector_setting_fields = {
'vector_weight': fields.Float,
'embedding_model_name': fields.String,
'embedding_provider_name': fields.String,
}
weighted_score_fields = {
'weight_type': fields.String,
'keyword_setting': fields.Nested(keyword_setting_fields),
'vector_setting': fields.Nested(vector_setting_fields),
}
dataset_retrieval_model_fields = {
'search_method': fields.String,
'reranking_enable': fields.Boolean,
'reranking_mode': fields.String,
'reranking_model': fields.Nested(reranking_model_fields),
'weights': fields.Nested(weighted_score_fields, allow_null=True),
'top_k': fields.Integer,
'score_threshold_enabled': fields.Boolean,
'score_threshold': fields.Float

View File

@ -0,0 +1,41 @@
"""update model
Revision ID: 53bf8af60645
Revises: 8e5588e6412e
Create Date: 2024-07-24 08:06:55.291031
"""
import sqlalchemy as sa
from alembic import op
import models as models
# revision identifiers, used by Alembic.
revision = '53bf8af60645'
down_revision = '8e5588e6412e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('embeddings', schema=None) as batch_op:
batch_op.alter_column('provider_name',
existing_type=sa.VARCHAR(length=40),
type_=sa.String(length=255),
existing_nullable=False,
existing_server_default=sa.text("''::character varying"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('embeddings', schema=None) as batch_op:
batch_op.alter_column('provider_name',
existing_type=sa.String(length=255),
type_=sa.VARCHAR(length=40),
existing_nullable=False,
existing_server_default=sa.text("''::character varying"))
# ### end Alembic commands ###

View File

@ -640,7 +640,7 @@ class Embedding(db.Model):
hash = db.Column(db.String(64), nullable=False)
embedding = db.Column(db.LargeBinary, nullable=False)
created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)'))
provider_name = db.Column(db.String(40), nullable=False,
provider_name = db.Column(db.String(255), nullable=False,
server_default=db.text("''::character varying"))
def set_embedding(self, embedding_data: list[float]):

View File

@ -328,7 +328,9 @@ class AppModelConfig(db.Model):
return {'retrieval_model': 'single'}
else:
return dataset_configs
return {'retrieval_model': 'single'}
return {
'retrieval_model': 'multiple',
}
@property
def file_upload_dict(self) -> dict:

1234
api/poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -163,7 +163,8 @@ redis = { version = "~5.0.3", extras = ["hiredis"] }
replicate = "~0.22.0"
resend = "~0.7.0"
safetensors = "~0.4.3"
sentry-sdk = { version = "~2.8.0", extras = ["flask"] }
scikit-learn = "^1.5.1"
sentry-sdk = { version = "~1.44.1", extras = ["flask"] }
sqlalchemy = "~2.0.29"
tencentcloud-sdk-python-hunyuan = "~3.0.1158"
tiktoken = "~0.7.0"
@ -175,7 +176,7 @@ werkzeug = "~3.0.1"
xinference-client = "0.9.4"
yarl = "~1.9.4"
zhipuai = "1.0.7"
rank-bm25 = "~0.2.2"
############################################################
# Tool dependencies required by tool implementations
############################################################
@ -200,7 +201,7 @@ cloudscraper = "1.2.71"
############################################################
[tool.poetry.group.vdb.dependencies]
chromadb = "~0.5.1"
chromadb = "0.5.1"
oracledb = "~2.2.1"
pgvecto-rs = "0.1.4"
pgvector = "0.2.5"

View File

@ -287,8 +287,12 @@ class AppService:
"""
db.session.delete(app)
db.session.commit()
# Trigger asynchronous deletion of app and related data
remove_app_and_related_data_task.delay(app.id)
remove_app_and_related_data_task.delay(
tenant_id=app.tenant_id,
app_id=app.id
)
def get_app_meta(self, app_model: App) -> dict:
"""

View File

@ -38,14 +38,16 @@ class HitTestingService:
if not retrieval_model:
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
all_documents = RetrievalService.retrieve(retrival_method=retrieval_model['search_method'],
all_documents = RetrievalService.retrieve(retrival_method=retrieval_model.get('search_method', 'semantic_search'),
dataset_id=dataset.id,
query=cls.escape_query_for_search(query),
top_k=retrieval_model['top_k'],
top_k=retrieval_model.get('top_k', 2),
score_threshold=retrieval_model['score_threshold']
if retrieval_model['score_threshold_enabled'] else None,
reranking_model=retrieval_model['reranking_model']
if retrieval_model['reranking_enable'] else None
if retrieval_model['reranking_enable'] else None,
reranking_mode=retrieval_model.get('reranking_mode', None),
weights=retrieval_model.get('weights', None),
)
end = time.perf_counter()

View File

@ -3,7 +3,6 @@ import time
import click
from celery import shared_task
from sqlalchemy import select
from sqlalchemy.exc import SQLAlchemyError
from extensions.ext_database import db
@ -25,6 +24,7 @@ from models.model import (
RecommendedApp,
Site,
TagBinding,
TraceAppConfig,
)
from models.tools import WorkflowToolProvider
from models.web import PinnedConversation, SavedMessage
@ -32,122 +32,287 @@ from models.workflow import Workflow, WorkflowAppLog, WorkflowNodeExecution, Wor
@shared_task(queue='app_deletion', bind=True, max_retries=3)
def remove_app_and_related_data_task(self, app_id: str):
logging.info(click.style(f'Start deleting app and related data: {app_id}', fg='green'))
def remove_app_and_related_data_task(self, tenant_id: str, app_id: str):
logging.info(click.style(f'Start deleting app and related data: {tenant_id}:{app_id}', fg='green'))
start_at = time.perf_counter()
try:
# Use a transaction to ensure all deletions succeed or none do
with db.session.begin_nested():
# Delete related data
_delete_app_model_configs(app_id)
_delete_app_site(app_id)
_delete_app_api_tokens(app_id)
_delete_installed_apps(app_id)
_delete_recommended_apps(app_id)
_delete_app_annotation_data(app_id)
_delete_app_dataset_joins(app_id)
_delete_app_workflows(app_id)
_delete_app_conversations(app_id)
_delete_app_messages(app_id)
_delete_workflow_tool_providers(app_id)
_delete_app_tag_bindings(app_id)
_delete_end_users(app_id)
# If we reach here, the transaction was successful
db.session.commit()
# Delete related data
_delete_app_model_configs(tenant_id, app_id)
_delete_app_site(tenant_id, app_id)
_delete_app_api_tokens(tenant_id, app_id)
_delete_installed_apps(tenant_id, app_id)
_delete_recommended_apps(tenant_id, app_id)
_delete_app_annotation_data(tenant_id, app_id)
_delete_app_dataset_joins(tenant_id, app_id)
_delete_app_workflows(tenant_id, app_id)
_delete_app_workflow_runs(tenant_id, app_id)
_delete_app_workflow_node_executions(tenant_id, app_id)
_delete_app_workflow_app_logs(tenant_id, app_id)
_delete_app_conversations(tenant_id, app_id)
_delete_app_messages(tenant_id, app_id)
_delete_workflow_tool_providers(tenant_id, app_id)
_delete_app_tag_bindings(tenant_id, app_id)
_delete_end_users(tenant_id, app_id)
_delete_trace_app_configs(tenant_id, app_id)
end_at = time.perf_counter()
logging.info(click.style(f'App and related data deleted: {app_id} latency: {end_at - start_at}', fg='green'))
except SQLAlchemyError as e:
db.session.rollback()
logging.exception(
click.style(f"Database error occurred while deleting app {app_id} and related data", fg='red'))
raise self.retry(exc=e, countdown=60) # Retry after 60 seconds
except Exception as e:
logging.exception(click.style(f"Error occurred while deleting app {app_id} and related data", fg='red'))
raise self.retry(exc=e, countdown=60) # Retry after 60 seconds
def _delete_app_model_configs(app_id: str):
db.session.query(AppModelConfig).filter(AppModelConfig.app_id == app_id).delete()
def _delete_app_model_configs(tenant_id: str, app_id: str):
def del_model_config(model_config_id: str):
db.session.query(AppModelConfig).filter(AppModelConfig.id == model_config_id).delete(synchronize_session=False)
_delete_records(
"""select id from app_model_configs where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_model_config,
"app model config"
)
def _delete_app_site(app_id: str):
db.session.query(Site).filter(Site.app_id == app_id).delete()
def _delete_app_site(tenant_id: str, app_id: str):
def del_site(site_id: str):
db.session.query(Site).filter(Site.id == site_id).delete(synchronize_session=False)
_delete_records(
"""select id from sites where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_site,
"site"
)
def _delete_app_api_tokens(app_id: str):
db.session.query(ApiToken).filter(ApiToken.app_id == app_id).delete()
def _delete_app_api_tokens(tenant_id: str, app_id: str):
def del_api_token(api_token_id: str):
db.session.query(ApiToken).filter(ApiToken.id == api_token_id).delete(synchronize_session=False)
_delete_records(
"""select id from api_tokens where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_api_token,
"api token"
)
def _delete_installed_apps(app_id: str):
db.session.query(InstalledApp).filter(InstalledApp.app_id == app_id).delete()
def _delete_installed_apps(tenant_id: str, app_id: str):
def del_installed_app(installed_app_id: str):
db.session.query(InstalledApp).filter(InstalledApp.id == installed_app_id).delete(synchronize_session=False)
_delete_records(
"""select id from installed_apps where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_installed_app,
"installed app"
)
def _delete_recommended_apps(app_id: str):
db.session.query(RecommendedApp).filter(RecommendedApp.app_id == app_id).delete()
def _delete_recommended_apps(tenant_id: str, app_id: str):
def del_recommended_app(recommended_app_id: str):
db.session.query(RecommendedApp).filter(RecommendedApp.id == recommended_app_id).delete(
synchronize_session=False)
_delete_records(
"""select id from recommended_apps where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_recommended_app,
"recommended app"
)
def _delete_app_annotation_data(app_id: str):
db.session.query(AppAnnotationHitHistory).filter(AppAnnotationHitHistory.app_id == app_id).delete()
db.session.query(AppAnnotationSetting).filter(AppAnnotationSetting.app_id == app_id).delete()
def _delete_app_annotation_data(tenant_id: str, app_id: str):
def del_annotation_hit_history(annotation_hit_history_id: str):
db.session.query(AppAnnotationHitHistory).filter(
AppAnnotationHitHistory.id == annotation_hit_history_id).delete(synchronize_session=False)
_delete_records(
"""select id from app_annotation_hit_histories where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_annotation_hit_history,
"annotation hit history"
)
def del_annotation_setting(annotation_setting_id: str):
db.session.query(AppAnnotationSetting).filter(AppAnnotationSetting.id == annotation_setting_id).delete(
synchronize_session=False)
_delete_records(
"""select id from app_annotation_settings where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_annotation_setting,
"annotation setting"
)
def _delete_app_dataset_joins(app_id: str):
db.session.query(AppDatasetJoin).filter(AppDatasetJoin.app_id == app_id).delete()
def _delete_app_dataset_joins(tenant_id: str, app_id: str):
def del_dataset_join(dataset_join_id: str):
db.session.query(AppDatasetJoin).filter(AppDatasetJoin.id == dataset_join_id).delete(synchronize_session=False)
_delete_records(
"""select id from app_dataset_joins where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_dataset_join,
"dataset join"
)
def _delete_app_workflows(app_id: str):
db.session.query(WorkflowRun).filter(
WorkflowRun.workflow_id.in_(
db.session.query(Workflow.id).filter(Workflow.app_id == app_id)
)
).delete(synchronize_session=False)
db.session.query(WorkflowNodeExecution).filter(
WorkflowNodeExecution.workflow_id.in_(
db.session.query(Workflow.id).filter(Workflow.app_id == app_id)
)
).delete(synchronize_session=False)
db.session.query(WorkflowAppLog).filter(WorkflowAppLog.app_id == app_id).delete(synchronize_session=False)
db.session.query(Workflow).filter(Workflow.app_id == app_id).delete(synchronize_session=False)
def _delete_app_workflows(tenant_id: str, app_id: str):
def del_workflow(workflow_id: str):
db.session.query(Workflow).filter(Workflow.id == workflow_id).delete(synchronize_session=False)
_delete_records(
"""select id from workflows where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_workflow,
"workflow"
)
def _delete_app_conversations(app_id: str):
db.session.query(PinnedConversation).filter(
PinnedConversation.conversation_id.in_(
db.session.query(Conversation.id).filter(Conversation.app_id == app_id)
)
).delete(synchronize_session=False)
db.session.query(Conversation).filter(Conversation.app_id == app_id).delete()
def _delete_app_workflow_runs(tenant_id: str, app_id: str):
def del_workflow_run(workflow_run_id: str):
db.session.query(WorkflowRun).filter(WorkflowRun.id == workflow_run_id).delete(synchronize_session=False)
_delete_records(
"""select id from workflow_runs where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_workflow_run,
"workflow run"
)
def _delete_app_messages(app_id: str):
message_ids = select(Message.id).filter(Message.app_id == app_id).scalar_subquery()
db.session.query(MessageFeedback).filter(MessageFeedback.message_id.in_(message_ids)).delete(
synchronize_session=False)
db.session.query(MessageAnnotation).filter(MessageAnnotation.message_id.in_(message_ids)).delete(
synchronize_session=False)
db.session.query(MessageChain).filter(MessageChain.message_id.in_(message_ids)).delete(synchronize_session=False)
db.session.query(MessageAgentThought).filter(MessageAgentThought.message_id.in_(message_ids)).delete(
synchronize_session=False)
db.session.query(MessageFile).filter(MessageFile.message_id.in_(message_ids)).delete(synchronize_session=False)
db.session.query(SavedMessage).filter(SavedMessage.message_id.in_(message_ids)).delete(synchronize_session=False)
db.session.query(Message).filter(Message.app_id == app_id).delete(synchronize_session=False)
def _delete_app_workflow_node_executions(tenant_id: str, app_id: str):
def del_workflow_node_execution(workflow_node_execution_id: str):
db.session.query(WorkflowNodeExecution).filter(
WorkflowNodeExecution.id == workflow_node_execution_id).delete(synchronize_session=False)
_delete_records(
"""select id from workflow_node_executions where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_workflow_node_execution,
"workflow node execution"
)
def _delete_workflow_tool_providers(app_id: str):
db.session.query(WorkflowToolProvider).filter(
WorkflowToolProvider.app_id == app_id
).delete(synchronize_session=False)
def _delete_app_workflow_app_logs(tenant_id: str, app_id: str):
def del_workflow_app_log(workflow_app_log_id: str):
db.session.query(WorkflowAppLog).filter(WorkflowAppLog.id == workflow_app_log_id).delete(synchronize_session=False)
_delete_records(
"""select id from workflow_app_logs where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_workflow_app_log,
"workflow app log"
)
def _delete_app_tag_bindings(app_id: str):
db.session.query(TagBinding).filter(
TagBinding.target_id == app_id
).delete(synchronize_session=False)
def _delete_app_conversations(tenant_id: str, app_id: str):
def del_conversation(conversation_id: str):
db.session.query(PinnedConversation).filter(PinnedConversation.conversation_id == conversation_id).delete(
synchronize_session=False)
db.session.query(Conversation).filter(Conversation.id == conversation_id).delete(synchronize_session=False)
_delete_records(
"""select id from conversations where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_conversation,
"conversation"
)
def _delete_end_users(app_id: str):
db.session.query(EndUser).filter(EndUser.app_id == app_id).delete()
def _delete_app_messages(tenant_id: str, app_id: str):
def del_message(message_id: str):
db.session.query(MessageFeedback).filter(MessageFeedback.message_id == message_id).delete(
synchronize_session=False)
db.session.query(MessageAnnotation).filter(MessageAnnotation.message_id == message_id).delete(
synchronize_session=False)
db.session.query(MessageChain).filter(MessageChain.message_id == message_id).delete(
synchronize_session=False)
db.session.query(MessageAgentThought).filter(MessageAgentThought.message_id == message_id).delete(
synchronize_session=False)
db.session.query(MessageFile).filter(MessageFile.message_id == message_id).delete(synchronize_session=False)
db.session.query(SavedMessage).filter(SavedMessage.message_id == message_id).delete(
synchronize_session=False)
db.session.query(Message).filter(Message.id == message_id).delete()
_delete_records(
"""select id from messages where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_message,
"message"
)
def _delete_workflow_tool_providers(tenant_id: str, app_id: str):
def del_tool_provider(tool_provider_id: str):
db.session.query(WorkflowToolProvider).filter(WorkflowToolProvider.id == tool_provider_id).delete(
synchronize_session=False)
_delete_records(
"""select id from tool_workflow_providers where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_tool_provider,
"tool workflow provider"
)
def _delete_app_tag_bindings(tenant_id: str, app_id: str):
def del_tag_binding(tag_binding_id: str):
db.session.query(TagBinding).filter(TagBinding.id == tag_binding_id).delete(synchronize_session=False)
_delete_records(
"""select id from tag_bindings where tenant_id=:tenant_id and target_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_tag_binding,
"tag binding"
)
def _delete_end_users(tenant_id: str, app_id: str):
def del_end_user(end_user_id: str):
db.session.query(EndUser).filter(EndUser.id == end_user_id).delete(synchronize_session=False)
_delete_records(
"""select id from end_users where tenant_id=:tenant_id and app_id=:app_id limit 1000""",
{"tenant_id": tenant_id, "app_id": app_id},
del_end_user,
"end user"
)
def _delete_trace_app_configs(tenant_id: str, app_id: str):
def del_trace_app_config(trace_app_config_id: str):
db.session.query(TraceAppConfig).filter(TraceAppConfig.id == trace_app_config_id).delete(
synchronize_session=False)
_delete_records(
"""select id from trace_app_config where app_id=:app_id limit 1000""",
{"app_id": app_id},
del_trace_app_config,
"trace app config"
)
def _delete_records(query_sql: str, params: dict, delete_func: callable, name: str) -> None:
while True:
with db.engine.begin() as conn:
rs = conn.execute(db.text(query_sql), params)
if rs.rowcount == 0:
break
for i in rs:
record_id = str(i.id)
try:
delete_func(record_id)
db.session.commit()
logging.info(click.style(f"Deleted {name} {record_id}", fg='green'))
except Exception:
logging.exception(f"Error occurred while deleting {name} {record_id}")
continue
rs.close()

View File

@ -2,14 +2,16 @@ import pytest
from pydantic import ValidationError
from core.app.segments import (
ArrayVariable,
FloatVariable,
IntegerVariable,
NoneVariable,
ObjectVariable,
SecretVariable,
SegmentType,
StringVariable,
factory,
)
from core.app.segments.variables import ArrayVariable, ObjectVariable
def test_string_variable():
@ -134,3 +136,13 @@ def test_variable_to_object():
assert var.to_object() == 3.14
var = SecretVariable(name='secret', value='secret_value')
assert var.to_object() == 'secret_value'
def test_build_a_object_variable_with_none_value():
var = factory.build_anonymous_variable(
{
'key1': None,
}
)
assert isinstance(var, ObjectVariable)
assert isinstance(var.value['key1'], NoneVariable)

View File

@ -3,7 +3,6 @@ set -x
pytest api/tests/integration_tests/vdb/chroma \
api/tests/integration_tests/vdb/milvus \
api/tests/integration_tests/vdb/myscale \
api/tests/integration_tests/vdb/pgvecto_rs \
api/tests/integration_tests/vdb/pgvector \
api/tests/integration_tests/vdb/qdrant \

View File

@ -2,7 +2,7 @@ version: '3'
services:
# API service
api:
image: langgenius/dify-api:0.6.14
image: langgenius/dify-api:0.6.15
restart: always
environment:
# Startup mode, 'api' starts the API server.
@ -224,7 +224,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.6.14
image: langgenius/dify-api:0.6.15
restart: always
environment:
CONSOLE_WEB_URL: ''
@ -390,7 +390,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.6.14
image: langgenius/dify-web:0.6.15
restart: always
environment:
# The base URL of console application api server, refers to the Console base URL of WEB service if console domain is

View File

@ -48,7 +48,7 @@ services:
# ssrf_proxy server
# for more information, please refer to
# https://docs.dify.ai/getting-started/install-self-hosted/install-faq#id-16.-why-is-ssrf_proxy-needed
# https://docs.dify.ai/learn-more/faq/self-host-faq#id-18.-why-is-ssrf_proxy-needed
ssrf_proxy:
image: ubuntu/squid:latest
restart: always

View File

@ -179,7 +179,7 @@ x-shared-env: &shared-api-worker-env
services:
# API service
api:
image: langgenius/dify-api:0.6.14
image: langgenius/dify-api:0.6.15
restart: always
environment:
# Use the shared environment variables.
@ -199,7 +199,7 @@ services:
# worker service
# The Celery worker for processing the queue.
worker:
image: langgenius/dify-api:0.6.14
image: langgenius/dify-api:0.6.15
restart: always
environment:
# Use the shared environment variables.
@ -218,7 +218,7 @@ services:
# Frontend web application.
web:
image: langgenius/dify-web:0.6.14
image: langgenius/dify-web:0.6.15
restart: always
environment:
CONSOLE_API_URL: ${CONSOLE_API_URL:-}

View File

@ -434,7 +434,7 @@ const AppInfo = ({ expand }: IAppInfoProps) => {
{showImportDSLModal && (
<UpdateDSLModal
onCancel={() => setShowImportDSLModal(false)}
onBackup={onExport}
onBackup={exportCheck}
/>
)}
{secretEnvList.length > 0 && (

View File

@ -43,6 +43,7 @@ type Props = {
promptVariables: PromptVariable[]
isContextMissing: boolean
onHideContextMissingTip: () => void
noResize?: boolean
}
const AdvancedPromptInput: FC<Props> = ({
@ -56,6 +57,7 @@ const AdvancedPromptInput: FC<Props> = ({
promptVariables,
isContextMissing,
onHideContextMissingTip,
noResize,
}) => {
const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext()
@ -207,6 +209,7 @@ const AdvancedPromptInput: FC<Props> = ({
<div className="h-[18px] leading-[18px] px-1 rounded-md bg-gray-100 text-xs text-gray-500">{value.length}</div>
</div>
)}
hideResize={noResize}
>
<PromptEditor
className='min-h-[84px]'

View File

@ -19,6 +19,10 @@ export type IPromptProps = {
promptTemplate: string
promptVariables: PromptVariable[]
readonly?: boolean
noTitle?: boolean
gradientBorder?: boolean
editorHeight?: number
noResize?: boolean
onChange?: (prompt: string, promptVariables: PromptVariable[]) => void
}
@ -26,7 +30,11 @@ const Prompt: FC<IPromptProps> = ({
mode,
promptTemplate,
promptVariables,
noTitle,
gradientBorder,
readonly = false,
editorHeight,
noResize,
onChange,
}) => {
const { t } = useTranslation()
@ -99,6 +107,10 @@ const Prompt: FC<IPromptProps> = ({
promptVariables={promptVariables}
readonly={readonly}
onChange={onChange}
noTitle={noTitle}
gradientBorder={gradientBorder}
editorHeight={editorHeight}
noResize={noResize}
/>
)
}
@ -121,6 +133,7 @@ const Prompt: FC<IPromptProps> = ({
promptVariables={promptVariables}
isContextMissing={isContextMissing && !isHideContextMissTip}
onHideContextMissingTip={() => setIsHideContextMissTip(true)}
noResize={noResize}
/>
))
)
@ -136,6 +149,7 @@ const Prompt: FC<IPromptProps> = ({
promptVariables={promptVariables}
isContextMissing={isContextMissing && !isHideContextMissTip}
onHideContextMissingTip={() => setIsHideContextMissTip(true)}
noResize={noResize}
/>
)
}

View File

@ -14,6 +14,7 @@ import PromptEditorHeightResizeWrap from './prompt-editor-height-resize-wrap'
import cn from '@/utils/classnames'
import { type PromptVariable } from '@/models/debug'
import Tooltip from '@/app/components/base/tooltip'
import type { CompletionParams } from '@/types/app'
import { AppType } from '@/types/app'
import { getNewVar, getVars } from '@/utils/var'
import AutomaticBtn from '@/app/components/app/configuration/config/automatic/automatic-btn'
@ -28,6 +29,7 @@ import { useEventEmitterContextContext } from '@/context/event-emitter'
import { ADD_EXTERNAL_DATA_TOOL } from '@/app/components/app/configuration/config-var'
import { INSERT_VARIABLE_VALUE_BLOCK_COMMAND } from '@/app/components/base/prompt-editor/plugins/variable-block'
import { PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER } from '@/app/components/base/prompt-editor/plugins/update-block'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
export type ISimplePromptInput = {
mode: AppType
@ -35,6 +37,10 @@ export type ISimplePromptInput = {
promptVariables: PromptVariable[]
readonly?: boolean
onChange?: (promp: string, promptVariables: PromptVariable[]) => void
noTitle?: boolean
gradientBorder?: boolean
editorHeight?: number
noResize?: boolean
}
const Prompt: FC<ISimplePromptInput> = ({
@ -43,11 +49,19 @@ const Prompt: FC<ISimplePromptInput> = ({
promptVariables,
readonly = false,
onChange,
noTitle,
gradientBorder,
editorHeight: initEditorHeight,
noResize,
}) => {
const { t } = useTranslation()
const media = useBreakpoints()
const isMobile = media === MediaType.mobile
const { eventEmitter } = useEventEmitterContextContext()
const {
modelConfig,
completionParams,
dataSets,
setModelConfig,
setPrevPromptConfig,
@ -116,6 +130,11 @@ const Prompt: FC<ISimplePromptInput> = ({
const [showAutomatic, { setTrue: showAutomaticTrue, setFalse: showAutomaticFalse }] = useBoolean(false)
const handleAutomaticRes = (res: AutomaticRes) => {
// put eventEmitter in first place to prevent overwrite the configs.prompt_variables.But another problem is that prompt won't hight the prompt_variables.
eventEmitter?.emit({
type: PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER,
payload: res.prompt,
} as any)
const newModelConfig = produce(modelConfig, (draft) => {
draft.configs.prompt_template = res.prompt
draft.configs.prompt_variables = res.variables.map(key => ({ key, name: key, type: 'string', required: true }))
@ -125,41 +144,41 @@ const Prompt: FC<ISimplePromptInput> = ({
if (mode !== AppType.completion)
setIntroduction(res.opening_statement)
showAutomaticFalse()
eventEmitter?.emit({
type: PROMPT_EDITOR_UPDATE_VALUE_BY_EVENT_EMITTER,
payload: res.prompt,
} as any)
}
const minHeight = 228
const minHeight = initEditorHeight || 228
const [editorHeight, setEditorHeight] = useState(minHeight)
return (
<div className={cn(!readonly ? `${s.gradientBorder}` : 'bg-gray-50', ' relative shadow-md')}>
<div className={cn((!readonly || gradientBorder) ? `${s.gradientBorder}` : 'bg-gray-50', ' relative shadow-md')}>
<div className='rounded-xl bg-[#EEF4FF]'>
<div className="flex justify-between items-center h-11 px-3">
<div className="flex items-center space-x-1">
<div className='h2'>{mode !== AppType.completion ? t('appDebug.chatSubTitle') : t('appDebug.completionSubTitle')}</div>
{!readonly && (
<Tooltip
htmlContent={<div className='w-[180px]'>
{t('appDebug.promptTip')}
</div>}
selector='config-prompt-tooltip'>
<RiQuestionLine className='w-[14px] h-[14px] text-indigo-400' />
</Tooltip>
)}
{!noTitle && (
<div className="flex justify-between items-center h-11 pl-3 pr-6">
<div className="flex items-center space-x-1">
<div className='h2'>{mode !== AppType.completion ? t('appDebug.chatSubTitle') : t('appDebug.completionSubTitle')}</div>
{!readonly && (
<Tooltip
htmlContent={<div className='w-[180px]'>
{t('appDebug.promptTip')}
</div>}
selector='config-prompt-tooltip'>
<RiQuestionLine className='w-[14px] h-[14px] text-indigo-400' />
</Tooltip>
)}
</div>
<div className='flex items-center'>
{!isAgent && !readonly && !isMobile && (
<AutomaticBtn onClick={showAutomaticTrue} />
)}
</div>
</div>
<div className='flex items-center'>
{!isAgent && !readonly && (
<AutomaticBtn onClick={showAutomaticTrue} />
)}
</div>
</div>
)}
<PromptEditorHeightResizeWrap
className='px-4 pt-2 min-h-[228px] bg-white rounded-t-xl text-sm text-gray-700'
height={editorHeight}
minHeight={minHeight}
onHeightChange={setEditorHeight}
hideResize={noResize}
footer={(
<div className='pl-4 pb-2 flex bg-white rounded-b-xl'>
<div className="h-[18px] leading-[18px] px-1 rounded-md bg-gray-100 text-xs text-gray-500">{promptTemplate.length}</div>
@ -216,6 +235,7 @@ const Prompt: FC<ISimplePromptInput> = ({
onBlur={() => {
handleChange(promptTemplate, getVars(promptTemplate))
}}
editable={!readonly}
/>
</PromptEditorHeightResizeWrap>
</div>
@ -232,6 +252,14 @@ const Prompt: FC<ISimplePromptInput> = ({
{showAutomatic && (
<GetAutomaticResModal
mode={mode as AppType}
model={
{
provider: modelConfig.provider,
name: modelConfig.model_id,
mode: modelConfig.mode,
completion_params: completionParams as CompletionParams,
}
}
isShow={showAutomatic}
onClose={showAutomaticFalse}
onFinished={handleAutomaticRes}

View File

@ -2,29 +2,21 @@
import type { FC } from 'react'
import React from 'react'
import { useTranslation } from 'react-i18next'
import { Generator } from '@/app/components/base/icons/src/vender/other'
export type IAutomaticBtnProps = {
onClick: () => void
}
const leftIcon = (
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4.31346 0.905711C4.21464 0.708087 4.01266 0.583252 3.79171 0.583252C3.57076 0.583252 3.36877 0.708087 3.26996 0.905711L2.81236 1.82091C2.64757 2.15048 2.59736 2.24532 2.53635 2.32447C2.47515 2.40386 2.40398 2.47503 2.32459 2.53623C2.24544 2.59724 2.1506 2.64745 1.82103 2.81224L0.905833 3.26984C0.708209 3.36865 0.583374 3.57064 0.583374 3.79159C0.583374 4.01254 0.708209 4.21452 0.905833 4.31333L1.82103 4.77094C2.1506 4.93572 2.24544 4.98593 2.32459 5.04694C2.40398 5.10814 2.47515 5.17931 2.53635 5.2587C2.59736 5.33785 2.64758 5.43269 2.81236 5.76226L3.26996 6.67746C3.36877 6.87508 3.57076 6.99992 3.79171 6.99992C4.01266 6.99992 4.21465 6.87508 4.31346 6.67746L4.77106 5.76226C4.93584 5.43269 4.98605 5.33786 5.04707 5.2587C5.10826 5.17931 5.17943 5.10814 5.25883 5.04694C5.33798 4.98593 5.43282 4.93572 5.76238 4.77094L6.67758 4.31333C6.87521 4.21452 7.00004 4.01254 7.00004 3.79159C7.00004 3.57064 6.87521 3.36865 6.67758 3.26984L5.76238 2.81224C5.43282 2.64745 5.33798 2.59724 5.25883 2.53623C5.17943 2.47503 5.10826 2.40386 5.04707 2.32447C4.98605 2.24532 4.93584 2.15048 4.77106 1.82091L4.31346 0.905711Z" fill="#444CE7" />
<path d="M11.375 1.74992C11.375 1.42775 11.1139 1.16659 10.7917 1.16659C10.4695 1.16659 10.2084 1.42775 10.2084 1.74992V2.62492H9.33337C9.01121 2.62492 8.75004 2.88609 8.75004 3.20825C8.75004 3.53042 9.01121 3.79159 9.33337 3.79159H10.2084V4.66659C10.2084 4.98875 10.4695 5.24992 10.7917 5.24992C11.1139 5.24992 11.375 4.98875 11.375 4.66659V3.79159H12.25C12.5722 3.79159 12.8334 3.53042 12.8334 3.20825C12.8334 2.88609 12.5722 2.62492 12.25 2.62492H11.375V1.74992Z" fill="#444CE7" />
<path d="M3.79171 9.33325C3.79171 9.01109 3.53054 8.74992 3.20837 8.74992C2.88621 8.74992 2.62504 9.01109 2.62504 9.33325V10.2083H1.75004C1.42787 10.2083 1.16671 10.4694 1.16671 10.7916C1.16671 11.1138 1.42787 11.3749 1.75004 11.3749H2.62504V12.2499C2.62504 12.5721 2.88621 12.8333 3.20837 12.8333C3.53054 12.8333 3.79171 12.5721 3.79171 12.2499V11.3749H4.66671C4.98887 11.3749 5.25004 11.1138 5.25004 10.7916C5.25004 10.4694 4.98887 10.2083 4.66671 10.2083H3.79171V9.33325Z" fill="#444CE7" />
<path d="M10.4385 6.73904C10.3396 6.54142 10.1377 6.41659 9.91671 6.41659C9.69576 6.41659 9.49377 6.54142 9.39496 6.73904L8.84014 7.84869C8.67535 8.17826 8.62514 8.27309 8.56413 8.35225C8.50293 8.43164 8.43176 8.50281 8.35237 8.56401C8.27322 8.62502 8.17838 8.67523 7.84881 8.84001L6.73917 9.39484C6.54154 9.49365 6.41671 9.69564 6.41671 9.91659C6.41671 10.1375 6.54154 10.3395 6.73917 10.4383L7.84881 10.9932C8.17838 11.1579 8.27322 11.2082 8.35237 11.2692C8.43176 11.3304 8.50293 11.4015 8.56413 11.4809C8.62514 11.5601 8.67535 11.6549 8.84014 11.9845L9.39496 13.0941C9.49377 13.2918 9.69576 13.4166 9.91671 13.4166C10.1377 13.4166 10.3396 13.2918 10.4385 13.0941L10.9933 11.9845C11.1581 11.6549 11.2083 11.5601 11.2693 11.4809C11.3305 11.4015 11.4017 11.3304 11.481 11.2692C11.5602 11.2082 11.655 11.1579 11.9846 10.9932L13.0942 10.4383C13.2919 10.3395 13.4167 10.1375 13.4167 9.91659C13.4167 9.69564 13.2919 9.49365 13.0942 9.39484L11.9846 8.84001C11.655 8.67523 11.5602 8.62502 11.481 8.56401C11.4017 8.50281 11.3305 8.43164 11.2693 8.35225C11.2083 8.27309 11.1581 8.17826 10.9933 7.84869L10.4385 6.73904Z" fill="#444CE7" />
</svg>
)
const AutomaticBtn: FC<IAutomaticBtnProps> = ({
onClick,
}) => {
const { t } = useTranslation()
return (
<div className='flex px-3 space-x-2 items-center !h-8 cursor-pointer'
<div className='flex space-x-1 items-center !h-8 cursor-pointer'
onClick={onClick}
>
{leftIcon}
<Generator className='w-3.5 h-3.5 text-indigo-600' />
<span className='text-xs font-semibold text-indigo-600'>{t('appDebug.operation.automatic')}</span>
</div>
)

View File

@ -1,70 +1,126 @@
'use client'
import type { FC } from 'react'
import React from 'react'
import React, { useCallback } from 'react'
import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import {
RiDatabase2Line,
RiFileExcel2Line,
RiGitCommitLine,
RiNewspaperLine,
RiPresentationLine,
RiRoadMapLine,
RiTerminalBoxLine,
RiTranslate,
RiUser2Line,
} from '@remixicon/react'
import cn from 'classnames'
import s from './style.module.css'
import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button'
import Toast from '@/app/components/base/toast'
import { generateRule } from '@/service/debug'
import ConfigPrompt from '@/app/components/app/configuration/config-prompt'
import type { Model } from '@/types/app'
import { AppType } from '@/types/app'
import ConfigVar from '@/app/components/app/configuration/config-var'
import OpeningStatement from '@/app/components/app/configuration/features/chat-group/opening-statement'
import GroupName from '@/app/components/app/configuration/base/group-name'
import Loading from '@/app/components/base/loading'
import Confirm from '@/app/components/base/confirm'
// type
import type { AutomaticRes } from '@/service/debug'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
const noDataIcon = (
<svg width="56" height="56" viewBox="0 0 56 56" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M10.4998 51.3333V39.6666M10.4998 16.3333V4.66663M4.6665 10.5H16.3332M4.6665 45.5H16.3332M30.3332 6.99996L26.2868 17.5206C25.6287 19.2315 25.2997 20.0869 24.7881 20.8065C24.3346 21.4442 23.7774 22.0014 23.1397 22.4549C22.4202 22.9665 21.5647 23.2955 19.8538 23.9535L9.33317 28L19.8539 32.0464C21.5647 32.7044 22.4202 33.0334 23.1397 33.5451C23.7774 33.9985 24.3346 34.5557 24.7881 35.1934C25.2997 35.913 25.6287 36.7684 26.2868 38.4793L30.3332 49L34.3796 38.4793C35.0376 36.7684 35.3666 35.913 35.8783 35.1934C36.3317 34.5557 36.8889 33.9985 37.5266 33.5451C38.2462 33.0334 39.1016 32.7044 40.8125 32.0464L51.3332 28L40.8125 23.9535C39.1016 23.2955 38.2462 22.9665 37.5266 22.4549C36.8889 22.0014 36.3317 21.4442 35.8783 20.8065C35.3666 20.0869 35.0376 19.2315 34.3796 17.5206L30.3332 6.99996Z" stroke="#EAECF0" strokeWidth="3" strokeLinecap="round" strokeLinejoin="round" />
</svg>
)
import { Generator } from '@/app/components/base/icons/src/vender/other'
export type IGetAutomaticResProps = {
mode: AppType
model: Model
isShow: boolean
onClose: () => void
onFinished: (res: AutomaticRes) => void
isInLLMNode?: boolean
}
const genIcon = (
<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M3.6665 1.33332C3.6665 0.965133 3.36803 0.666656 2.99984 0.666656C2.63165 0.666656 2.33317 0.965133 2.33317 1.33332V2.33332H1.33317C0.964981 2.33332 0.666504 2.6318 0.666504 2.99999C0.666504 3.36818 0.964981 3.66666 1.33317 3.66666H2.33317V4.66666C2.33317 5.03485 2.63165 5.33332 2.99984 5.33332C3.36803 5.33332 3.6665 5.03485 3.6665 4.66666V3.66666H4.6665C5.03469 3.66666 5.33317 3.36818 5.33317 2.99999C5.33317 2.6318 5.03469 2.33332 4.6665 2.33332H3.6665V1.33332Z" fill="white" />
<path d="M3.6665 11.3333C3.6665 10.9651 3.36803 10.6667 2.99984 10.6667C2.63165 10.6667 2.33317 10.9651 2.33317 11.3333V12.3333H1.33317C0.964981 12.3333 0.666504 12.6318 0.666504 13C0.666504 13.3682 0.964981 13.6667 1.33317 13.6667H2.33317V14.6667C2.33317 15.0348 2.63165 15.3333 2.99984 15.3333C3.36803 15.3333 3.6665 15.0348 3.6665 14.6667V13.6667H4.6665C5.03469 13.6667 5.33317 13.3682 5.33317 13C5.33317 12.6318 5.03469 12.3333 4.6665 12.3333H3.6665V11.3333Z" fill="white" />
<path d="M9.28873 1.76067C9.18971 1.50321 8.94235 1.33332 8.6665 1.33332C8.39066 1.33332 8.1433 1.50321 8.04427 1.76067L6.88815 4.76658C6.68789 5.28727 6.62495 5.43732 6.53887 5.55838C6.4525 5.67986 6.34637 5.78599 6.2249 5.87236C6.10384 5.95844 5.95379 6.02137 5.43309 6.22164L2.42718 7.37776C2.16972 7.47678 1.99984 7.72414 1.99984 7.99999C1.99984 8.27584 2.16972 8.5232 2.42718 8.62222L5.43309 9.77834C5.95379 9.97861 6.10384 10.0415 6.2249 10.1276C6.34637 10.214 6.4525 10.3201 6.53887 10.4416C6.62495 10.5627 6.68789 10.7127 6.88816 11.2334L8.04427 14.2393C8.1433 14.4968 8.39066 14.6667 8.6665 14.6667C8.94235 14.6667 9.18971 14.4968 9.28873 14.2393L10.4449 11.2334C10.6451 10.7127 10.7081 10.5627 10.7941 10.4416C10.8805 10.3201 10.9866 10.214 11.1081 10.1276C11.2292 10.0415 11.3792 9.97861 11.8999 9.77834L14.9058 8.62222C15.1633 8.5232 15.3332 8.27584 15.3332 7.99999C15.3332 7.72414 15.1633 7.47678 14.9058 7.37776L11.8999 6.22164C11.3792 6.02137 11.2292 5.95844 11.1081 5.87236C10.9866 5.78599 10.8805 5.67986 10.7941 5.55838C10.7081 5.43732 10.6451 5.28727 10.4449 4.76658L9.28873 1.76067Z" fill="white" />
</svg>
)
const TryLabel: FC<{
Icon: any
text: string
onClick: () => void
}> = ({ Icon, text, onClick }) => {
return (
<div
className='mt-2 mr-1 shrink-0 flex h-7 items-center px-2 bg-gray-100 rounded-lg cursor-pointer'
onClick={onClick}
>
<Icon className='w-4 h-4 text-gray-500'></Icon>
<div className='ml-1 text-xs font-medium text-gray-700'>{text}</div>
</div>
)
}
const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
mode,
model,
isShow,
onClose,
// appId,
isInLLMNode,
onFinished,
}) => {
const { t } = useTranslation()
const media = useBreakpoints()
const isMobile = media === MediaType.mobile
const tryList = [
{
icon: RiTerminalBoxLine,
key: 'pythonDebugger',
},
{
icon: RiTranslate,
key: 'translation',
},
{
icon: RiPresentationLine,
key: 'meetingTakeaways',
},
{
icon: RiNewspaperLine,
key: 'writingsPolisher',
},
{
icon: RiUser2Line,
key: 'professionalAnalyst',
},
{
icon: RiFileExcel2Line,
key: 'excelFormulaExpert',
},
{
icon: RiRoadMapLine,
key: 'travelPlanning',
},
{
icon: RiDatabase2Line,
key: 'SQLSorcerer',
},
{
icon: RiGitCommitLine,
key: 'GitGud',
},
]
const [audiences, setAudiences] = React.useState<string>('')
const [hopingToSolve, setHopingToSolve] = React.useState<string>('')
const isValid = () => {
if (audiences.trim() === '') {
Toast.notify({
type: 'error',
message: t('appDebug.automatic.audiencesRequired'),
})
return false
const [instruction, setInstruction] = React.useState<string>('')
const handleChooseTemplate = useCallback((key: string) => {
return () => {
const template = t(`appDebug.generate.template.${key}.instruction`)
setInstruction(template)
}
if (hopingToSolve.trim() === '') {
}, [t])
const isValid = () => {
if (instruction.trim() === '') {
Toast.notify({
type: 'error',
message: t('appDebug.automatic.problemRequired'),
message: t('common.errorMsg.fieldRequired', {
field: t('appDebug.generate.instruction'),
}),
})
return false
}
@ -76,14 +132,17 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
const renderLoading = (
<div className='w-0 grow flex flex-col items-center justify-center h-full space-y-3'>
<Loading />
<div className='text-[13px] text-gray-400'>{t('appDebug.automatic.loading')}</div>
<div className='text-[13px] text-gray-400'>{t('appDebug.generate.loading')}</div>
</div>
)
const renderNoData = (
<div className='w-0 grow flex flex-col items-center px-8 justify-center h-full space-y-3'>
{noDataIcon}
<div className='text-[13px] text-gray-400'>{t('appDebug.automatic.noData')}</div>
<Generator className='w-14 h-14 text-gray-300' />
<div className='leading-5 text-center text-[13px] font-normal text-gray-400'>
<div>{t('appDebug.generate.noDataLine1')}</div>
<div>{t('appDebug.generate.noDataLine2')}</div>
</div>
</div>
)
@ -94,11 +153,18 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
return
setLoadingTrue()
try {
const res = await generateRule({
audiences,
hoping_to_solve: hopingToSolve,
const { error, ...res } = await generateRule({
instruction,
model_config: model,
no_variable: !!isInLLMNode,
})
setRes(res)
if (error) {
Toast.notify({
type: 'error',
message: error,
})
}
}
finally {
setLoadingFalse()
@ -107,24 +173,7 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
const [showConfirmOverwrite, setShowConfirmOverwrite] = React.useState(false)
const isShowAutoPromptInput = () => {
if (isMobile) {
// hide prompt panel on mobile if it is loading or has had result
if (isLoading || res)
return false
return true
}
// always display prompt panel on desktop mode
return true
}
const isShowAutoPromptResPlaceholder = () => {
if (isMobile) {
// hide placeholder panel on mobile
return false
}
return !isLoading && !res
}
@ -132,75 +181,97 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
<Modal
isShow={isShow}
onClose={onClose}
className='!p-0 sm:min-w-[768px] xl:min-w-[1120px]'
className='!p-0 min-w-[1140px]'
closable
>
<div className='flex h-[680px] flex-wrap gap-y-4 overflow-y-auto'>
{isShowAutoPromptInput() && <div className='w-full sm:w-[360px] xl:w-[480px] shrink-0 px-8 py-6 h-full overflow-y-auto border-r border-gray-100'>
<div>
<div className='mb-1 text-xl font-semibold text-primary-600'>{t('appDebug.automatic.title')}</div>
<div className='text-[13px] font-normal text-gray-500'>{t('appDebug.automatic.description')}</div>
<div className='flex h-[680px] flex-wrap'>
<div className='w-[570px] shrink-0 p-6 h-full overflow-y-auto border-r border-gray-100'>
<div className='mb-8'>
<div className={`leading-[28px] text-lg font-bold ${s.textGradient}`}>{t('appDebug.generate.title')}</div>
<div className='mt-1 text-[13px] font-normal text-gray-500'>{t('appDebug.generate.description')}</div>
</div>
<div >
<div className='flex items-center'>
<div className='mr-3 shrink-0 leading-[18px] text-xs font-semibold text-gray-500 uppercase'>{t('appDebug.generate.tryIt')}</div>
<div className='grow h-px' style={{
background: 'linear-gradient(to right, rgba(243, 244, 246, 1), rgba(243, 244, 246, 0))',
}}></div>
</div>
<div className='flex flex-wrap'>
{tryList.map(item => (
<TryLabel
key={item.key}
Icon={item.icon}
text={t(`appDebug.generate.template.${item.key}.name`)}
onClick={handleChooseTemplate(item.key)}
/>
))}
</div>
</div>
{/* inputs */}
<div className='mt-2 space-y-5'>
<div className='space-y-2'>
<div className='text-[13px] font-medium text-gray-900'>{t('appDebug.automatic.intendedAudience')}</div>
<input className="w-full h-8 px-3 text-[13px] font-normal bg-gray-50 rounded-lg" placeholder={t('appDebug.automatic.intendedAudiencePlaceHolder') as string} value={audiences} onChange={e => setAudiences(e.target.value)} />
</div>
<div className='space-y-2'>
<div className='text-[13px] font-medium text-gray-900'>{t('appDebug.automatic.solveProblem')}</div>
<textarea className="w-full h-[200px] overflow-y-auto p-3 text-[13px] font-normal bg-gray-50 rounded-lg" placeholder={t('appDebug.automatic.solveProblemPlaceHolder') as string} value={hopingToSolve} onChange={e => setHopingToSolve(e.target.value)} />
<div className='mt-6'>
<div className='text-[0px]'>
<div className='mb-2 leading-5 text-sm font-medium text-gray-900'>{t('appDebug.generate.instruction')}</div>
<textarea className="w-full h-[200px] overflow-y-auto px-3 py-2 text-sm bg-gray-50 rounded-lg" placeholder={t('appDebug.generate.instructionPlaceHolder') as string} value={instruction} onChange={e => setInstruction(e.target.value)} />
</div>
<div className='mt-6 flex justify-end'>
<div className='mt-5 flex justify-end'>
<Button
className='flex space-x-2'
className='flex space-x-1'
variant='primary'
onClick={onGenerate}
disabled={isLoading}
>
{genIcon}
<span className='text-xs font-semibold text-white uppercase'>{t('appDebug.automatic.generate')}</span>
<Generator className='w-4 h-4 text-white' />
<span className='text-xs font-semibold text-white'>{t('appDebug.generate.generate')}</span>
</Button>
</div>
</div>
</div>}
</div>
{(!isLoading && res) && (
<div className='w-0 grow px-8 pt-6 h-full overflow-y-auto'>
<div className='mb-4 text-lg font-medium text-gray-900'>{t('appDebug.automatic.resTitle')}</div>
<div className='w-0 grow p-6 pb-0 h-full'>
<div className='shrink-0 mb-3 leading-[160%] text-base font-semibold text-gray-800'>{t('appDebug.generate.resTitle')}</div>
<div className={cn('max-h-[555px] overflow-y-auto', !isInLLMNode && 'pb-2')}>
<ConfigPrompt
mode={mode}
promptTemplate={res?.prompt || ''}
promptVariables={[]}
readonly
noTitle={isInLLMNode}
gradientBorder
editorHeight={isInLLMNode ? 524 : 0}
noResize={isInLLMNode}
/>
{!isInLLMNode && (
<>
{(res?.variables?.length && res?.variables?.length > 0)
? (
<ConfigVar
promptVariables={res?.variables.map(key => ({ key, name: key, type: 'string', required: true })) || []}
readonly
/>
)
: ''}
<ConfigPrompt
mode={mode}
promptTemplate={res?.prompt || ''}
promptVariables={[]}
readonly
/>
{(mode !== AppType.completion && res?.opening_statement) && (
<div className='mt-7'>
<GroupName name={t('appDebug.feature.groupChat.title')} />
<OpeningStatement
value={res?.opening_statement || ''}
readonly
/>
</div>
)}
</>
)}
</div>
{(res?.variables?.length && res?.variables?.length > 0)
? (
<ConfigVar
promptVariables={res?.variables.map(key => ({ key, name: key, type: 'string', required: true })) || []}
readonly
/>
)
: ''}
{(mode !== AppType.completion && res?.opening_statement) && (
<div className='mt-7'>
<GroupName name={t('appDebug.feature.groupChat.title')} />
<OpeningStatement
value={res?.opening_statement || ''}
readonly
/>
</div>
)}
<div className='sticky bottom-0 flex justify-end right-0 py-4 bg-white'>
<div className='flex justify-end py-4 bg-white'>
<Button onClick={onClose}>{t('common.operation.cancel')}</Button>
<Button variant='primary' className='ml-2' onClick={() => {
setShowConfirmOverwrite(true)
}}>{t('appDebug.automatic.apply')}</Button>
}}>{t('appDebug.generate.apply')}</Button>
</div>
</div>
)}
@ -208,8 +279,8 @@ const GetAutomaticRes: FC<IGetAutomaticResProps> = ({
{isShowAutoPromptResPlaceholder() && renderNoData}
{showConfirmOverwrite && (
<Confirm
title={t('appDebug.automatic.overwriteTitle')}
content={t('appDebug.automatic.overwriteMessage')}
title={t('appDebug.generate.overwriteTitle')}
content={t('appDebug.generate.overwriteMessage')}
isShow={showConfirmOverwrite}
onClose={() => setShowConfirmOverwrite(false)}
onConfirm={() => {

View File

@ -0,0 +1,7 @@
.textGradient {
background: linear-gradient(92deg, #2250F2 -29.55%, #0EBCF3 75.22%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
text-fill-color: transparent;
}

View File

@ -1,18 +1,20 @@
'use client'
import type { FC } from 'react'
import React, { useState } from 'react'
import { useTranslation } from 'react-i18next'
import { RiDeleteBinLine } from '@remixicon/react'
import {
RiDeleteBinLine,
RiEditLine,
} from '@remixicon/react'
import SettingsModal from '../settings-modal'
import type { DataSet } from '@/models/datasets'
import { DataSourceType } from '@/models/datasets'
import { formatNumber } from '@/utils/format'
import FileIcon from '@/app/components/base/file-icon'
import { Settings01 } from '@/app/components/base/icons/src/vender/line/general'
import { Folder } from '@/app/components/base/icons/src/vender/solid/files'
import { Globe06 } from '@/app/components/base/icons/src/vender/solid/mapsAndTravel'
import Drawer from '@/app/components/base/drawer'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
import Badge from '@/app/components/base/badge'
import { useKnowledge } from '@/hooks/use-knowledge'
type ItemProps = {
className?: string
@ -27,12 +29,10 @@ const Item: FC<ItemProps> = ({
onSave,
onRemove,
}) => {
const { t } = useTranslation()
const media = useBreakpoints()
const isMobile = media === MediaType.mobile
const [showSettingsModal, setShowSettingsModal] = useState(false)
const { formatIndexingTechniqueAndMethod } = useKnowledge()
const handleSave = (newDataset: DataSet) => {
onSave(newDataset)
@ -65,22 +65,17 @@ const Item: FC<ItemProps> = ({
<div className='grow'>
<div className='flex items-center h-[18px]'>
<div className='grow text-[13px] font-medium text-gray-800 truncate' title={config.name}>{config.name}</div>
<div className='shrink-0 text-xs text-gray-500'>
{formatNumber(config.word_count)} {t('appDebug.feature.dataSet.words')} · {formatNumber(config.document_count)} {t('appDebug.feature.dataSet.textBlocks')}
</div>
<Badge
text={formatIndexingTechniqueAndMethod(config.indexing_technique, config.retrieval_model_dict?.search_method)}
/>
</div>
{/* {
config.description && (
<div className='text-xs text-gray-500'>{config.description}</div>
)
} */}
</div>
<div className='hidden rounded-lg group-hover:flex items-center justify-end absolute right-0 top-0 bottom-0 pr-2 w-[124px] bg-gradient-to-r from-white/50 to-white to-50%'>
<div
className='flex items-center justify-center mr-1 w-6 h-6 hover:bg-black/5 rounded-md cursor-pointer'
onClick={() => setShowSettingsModal(true)}
>
<Settings01 className='w-4 h-4 text-gray-500' />
<RiEditLine className='w-4 h-4 text-gray-500' />
</div>
<div
className='group/action flex items-center justify-center w-6 h-6 hover:bg-[#FEE4E2] rounded-md cursor-pointer'

View File

@ -44,7 +44,8 @@ const DatasetConfig: FC = () => {
const handleSave = (newDataset: DataSet) => {
const index = dataSet.findIndex(item => item.id === newDataset.id)
setDataSet([...dataSet.slice(0, index), newDataset, ...dataSet.slice(index + 1)])
const newDatasets = [...dataSet.slice(0, index), newDataset, ...dataSet.slice(index + 1)]
setDataSet(newDatasets)
formattingChangedDispatcher()
}
@ -74,7 +75,7 @@ const DatasetConfig: FC = () => {
title={t('appDebug.feature.dataSet.title')}
headerRight={
<div className='flex items-center gap-1'>
{!isAgent && <ParamsConfig />}
{!isAgent && <ParamsConfig disabled={!hasData} selectedDatasets={dataSet} />}
<OperationBtn type="add" onClick={showSelectDataSet} />
</div>
}

View File

@ -1,10 +1,12 @@
'use client'
import React from 'react'
import { memo, useMemo } from 'react'
import type { FC } from 'react'
import { useTranslation } from 'react-i18next'
import {
RiQuestionLine,
} from '@remixicon/react'
import WeightedScore from './weighted-score'
import TopKItem from '@/app/components/base/param-item/top-k-item'
import ScoreThresholdItem from '@/app/components/base/param-item/score-threshold-item'
import RadioCard from '@/app/components/base/radio-card/simple'
@ -16,13 +18,20 @@ import {
import type {
DatasetConfigs,
} from '@/models/debug'
import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import type { ModelConfig } from '@/app/components/workflow/types'
import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal'
import TooltipPlus from '@/app/components/base/tooltip-plus'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type {
DataSet,
WeightedScoreEnum,
} from '@/models/datasets'
import { RerankingModeEnum } from '@/models/datasets'
import cn from '@/utils/classnames'
import { useSelectedDatasetsMode } from '@/app/components/workflow/nodes/knowledge-retrieval/hooks'
import Switch from '@/app/components/base/switch'
type Props = {
datasetConfigs: DatasetConfigs
@ -31,6 +40,7 @@ type Props = {
singleRetrievalModelConfig?: ModelConfig
onSingleRetrievalModelChange?: (config: ModelConfig) => void
onSingleRetrievalModelParamsChange?: (config: ModelConfig) => void
selectedDatasets?: DataSet[]
}
const ConfigContent: FC<Props> = ({
@ -40,8 +50,10 @@ const ConfigContent: FC<Props> = ({
singleRetrievalModelConfig: singleRetrievalConfig = {} as ModelConfig,
onSingleRetrievalModelChange = () => { },
onSingleRetrievalModelParamsChange = () => { },
selectedDatasets = [],
}) => {
const { t } = useTranslation()
const selectedDatasetsMode = useSelectedDatasetsMode(selectedDatasets)
const type = datasetConfigs.retrieval_model
const setType = (value: RETRIEVE_TYPE) => {
onChange({
@ -54,7 +66,7 @@ const ConfigContent: FC<Props> = ({
defaultModel: rerankDefaultModel,
} = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.rerank)
const rerankModel = (() => {
if (datasetConfigs.reranking_model) {
if (datasetConfigs.reranking_model?.reranking_provider_name) {
return {
provider_name: datasetConfigs.reranking_model.reranking_provider_name,
model_name: datasetConfigs.reranking_model.reranking_model_name,
@ -93,14 +105,73 @@ const ConfigContent: FC<Props> = ({
})
}
const handleWeightedScoreChange = (value: { type: WeightedScoreEnum; value: number[] }) => {
const configs = {
...datasetConfigs,
weights: {
...datasetConfigs.weights!,
weight_type: value.type,
vector_setting: {
...datasetConfigs.weights!.vector_setting!,
vector_weight: value.value[0],
},
keyword_setting: {
keyword_weight: value.value[1],
},
},
}
onChange(configs)
}
const handleRerankModeChange = (mode: RerankingModeEnum) => {
onChange({
...datasetConfigs,
reranking_mode: mode,
})
}
const model = singleRetrievalConfig
const rerankingModeOptions = [
{
value: RerankingModeEnum.WeightedScore,
label: t('dataset.weightedScore.title'),
tips: t('dataset.weightedScore.description'),
},
{
value: RerankingModeEnum.RerankingModel,
label: t('common.modelProvider.rerankModel.key'),
tips: t('common.modelProvider.rerankModel.tip'),
},
]
const showWeightedScore = selectedDatasetsMode.allHighQuality
&& !selectedDatasetsMode.inconsistentEmbeddingModel
const showWeightedScorePanel = showWeightedScore && datasetConfigs.reranking_mode === RerankingModeEnum.WeightedScore && datasetConfigs.weights
const selectedRerankMode = datasetConfigs.reranking_mode || RerankingModeEnum.RerankingModel
const showRerankModel = useMemo(() => {
if (datasetConfigs.reranking_enable === false && selectedDatasetsMode.allEconomic)
return false
return true
}, [datasetConfigs.reranking_enable, selectedDatasetsMode.allEconomic])
return (
<div>
<div className='system-xl-semibold text-text-primary'>{t('dataset.retrievalSettings')}</div>
<div className='mt-2 space-y-3'>
<RadioCard
icon={<NTo1Retrieval className='shrink-0 mr-3 w-9 h-9 rounded-lg' />}
title={t('appDebug.datasetConfig.retrieveOneWay.title')}
title={(
<div className='flex items-center'>
{t('appDebug.datasetConfig.retrieveOneWay.title')}
<TooltipPlus popupContent={<div className='w-[320px]'>{t('dataset.nTo1RetrievalLegacy')}</div>}>
<div className='ml-1 flex items-center px-[5px] h-[18px] rounded-[5px] border border-text-accent-secondary system-2xs-medium-uppercase text-text-accent-secondary'>legacy</div>
</TooltipPlus>
</div>
)}
description={t('appDebug.datasetConfig.retrieveOneWay.description')}
isChosen={type === RETRIEVE_TYPE.oneWay}
onChosen={() => { setType(RETRIEVE_TYPE.oneWay) }}
@ -115,43 +186,152 @@ const ConfigContent: FC<Props> = ({
</div>
{type === RETRIEVE_TYPE.multiWay && (
<>
<div className='mt-6'>
<div className='leading-[32px] text-[13px] font-medium text-gray-900'>{t('common.modelProvider.rerankModel.key')}</div>
<div>
<ModelSelector
defaultModel={rerankModel && { provider: rerankModel?.provider_name, model: rerankModel?.model_name }}
onSelect={(v) => {
onChange({
...datasetConfigs,
reranking_model: {
reranking_provider_name: v.provider,
reranking_model_name: v.model,
},
})
}}
modelList={rerankModelList}
/>
</div>
</div>
<div className='mt-4 space-y-4'>
<TopKItem
value={datasetConfigs.top_k}
onChange={handleParamChange}
enable={true}
/>
<ScoreThresholdItem
value={datasetConfigs.score_threshold as number}
onChange={handleParamChange}
enable={datasetConfigs.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={handleSwitch}
/>
<div className='mb-2 mt-4 h-[1px] bg-divider-subtle'></div>
<div
className='flex items-center mb-2 h-6 system-md-semibold text-text-secondary'
>
{t('dataset.rerankSettings')}
</div>
{
selectedDatasetsMode.inconsistentEmbeddingModel
&& (
<div className='mt-4 system-xs-regular text-text-warning'>
{t('dataset.inconsistentEmbeddingModelTip')}
</div>
)
}
{
selectedDatasetsMode.mixtureHighQualityAndEconomic
&& (
<div className='mt-4 system-xs-regular text-text-warning'>
{t('dataset.mixtureHighQualityAndEconomicTip')}
</div>
)
}
{
showWeightedScore && (
<div className='flex items-center justify-between'>
{
rerankingModeOptions.map(option => (
<div
key={option.value}
className={cn(
'flex items-center justify-center w-[calc((100%-8px)/2)] h-8 rounded-lg border border-components-option-card-option-border bg-components-option-card-option-bg cursor-pointer system-sm-medium text-text-secondary',
selectedRerankMode === option.value && 'border-[1.5px] border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg text-text-primary',
)}
onClick={() => handleRerankModeChange(option.value)}
>
<div className='truncate'>{option.label}</div>
<TooltipPlus
popupContent={<div className='w-[200px]'>{option.tips}</div>}
hideArrow
>
<RiQuestionLine className='ml-0.5 w-3.5 h-4.5 text-text-quaternary' />
</TooltipPlus>
</div>
))
}
</div>
)
}
{
!showWeightedScorePanel && (
<div className='mt-2'>
<div className='flex items-center'>
{
selectedDatasetsMode.allEconomic && (
<Switch
size='md'
defaultValue={showRerankModel}
onChange={(v) => {
onChange({
...datasetConfigs,
reranking_enable: v,
})
}}
/>
)
}
<div className='ml-2 leading-[32px] text-[13px] font-medium text-gray-900'>{t('common.modelProvider.rerankModel.key')}</div>
<TooltipPlus popupContent={<div className="w-[200px]">{t('common.modelProvider.rerankModel.tip')}</div>}>
<RiQuestionLine className='ml-0.5 w-[14px] h-[14px] text-gray-400' />
</TooltipPlus>
</div>
<div>
<ModelSelector
defaultModel={rerankModel && { provider: rerankModel?.provider_name, model: rerankModel?.model_name }}
onSelect={(v) => {
onChange({
...datasetConfigs,
reranking_model: {
reranking_provider_name: v.provider,
reranking_model_name: v.model,
},
})
}}
modelList={rerankModelList}
/>
</div>
</div>
)
}
{
showWeightedScorePanel
&& (
<div className='mt-2 space-y-4'>
<WeightedScore
value={{
type: datasetConfigs.weights!.weight_type,
value: [
datasetConfigs.weights!.vector_setting.vector_weight,
datasetConfigs.weights!.keyword_setting.keyword_weight,
],
}}
onChange={handleWeightedScoreChange}
/>
<TopKItem
value={datasetConfigs.top_k}
onChange={handleParamChange}
enable={true}
/>
<ScoreThresholdItem
value={datasetConfigs.score_threshold as number}
onChange={handleParamChange}
enable={datasetConfigs.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={handleSwitch}
/>
</div>
)
}
{
!showWeightedScorePanel
&& (
<div className='mt-4 space-y-4'>
<TopKItem
value={datasetConfigs.top_k}
onChange={handleParamChange}
enable={true}
/>
{
showRerankModel && (
<ScoreThresholdItem
value={datasetConfigs.score_threshold as number}
onChange={handleParamChange}
enable={datasetConfigs.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={handleSwitch}
/>
)
}
</div>
)
}
</>
)}
{isInWorkflow && type === RETRIEVE_TYPE.oneWay && (
<div className='mt-6'>
<div className='mt-4'>
<div className='flex items-center space-x-0.5'>
<div className='leading-[32px] text-[13px] font-medium text-gray-900'>{t('common.modelProvider.systemReasoningModel.key')}</div>
<TooltipPlus
@ -180,4 +360,4 @@ const ConfigContent: FC<Props> = ({
</div >
)
}
export default React.memo(ConfigContent)
export default memo(ConfigContent)

View File

@ -1,29 +1,73 @@
'use client'
import type { FC } from 'react'
import { memo, useState } from 'react'
import { memo, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useContext } from 'use-context-selector'
import { RiEqualizer2Line } from '@remixicon/react'
import ConfigContent from './config-content'
import cn from '@/utils/classnames'
import { Settings04 } from '@/app/components/base/icons/src/vender/line/general'
import ConfigContext from '@/context/debug-configuration'
import Modal from '@/app/components/base/modal'
import Button from '@/app/components/base/button'
import { RETRIEVE_TYPE } from '@/types/app'
import Toast from '@/app/components/base/toast'
import { DATASET_DEFAULT } from '@/config'
import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import type { DataSet } from '@/models/datasets'
import type { DatasetConfigs } from '@/models/debug'
import {
getMultipleRetrievalConfig,
getSelectedDatasetsMode,
} from '@/app/components/workflow/nodes/knowledge-retrieval/utils'
const ParamsConfig: FC = () => {
type ParamsConfigProps = {
disabled?: boolean
selectedDatasets: DataSet[]
}
const ParamsConfig = ({
disabled,
selectedDatasets,
}: ParamsConfigProps) => {
const { t } = useTranslation()
const [open, setOpen] = useState(false)
const {
datasetConfigs,
setDatasetConfigs,
rerankSettingModalOpen,
setRerankSettingModalOpen,
} = useContext(ConfigContext)
const [tempDataSetConfigs, setTempDataSetConfigs] = useState(datasetConfigs)
useEffect(() => {
const {
allEconomic,
} = getSelectedDatasetsMode(selectedDatasets)
const { datasets, retrieval_model, score_threshold_enabled, ...restConfigs } = datasetConfigs
let rerankEnable = restConfigs.reranking_enable
if (allEconomic && !restConfigs.reranking_model?.reranking_provider_name && rerankEnable === undefined)
rerankEnable = false
setTempDataSetConfigs({
...getMultipleRetrievalConfig({
top_k: restConfigs.top_k,
score_threshold: restConfigs.score_threshold,
reranking_model: restConfigs.reranking_model && {
provider: restConfigs.reranking_model.reranking_provider_name,
model: restConfigs.reranking_model.reranking_model_name,
},
reranking_mode: restConfigs.reranking_mode,
weights: restConfigs.weights,
reranking_enable: rerankEnable,
}, selectedDatasets),
reranking_model: restConfigs.reranking_model && {
reranking_provider_name: restConfigs.reranking_model.reranking_provider_name,
reranking_model_name: restConfigs.reranking_model.reranking_model_name,
},
retrieval_model,
score_threshold_enabled,
datasets,
})
}, [selectedDatasets, datasetConfigs])
const {
defaultModel: rerankDefaultModel,
currentModel: isRerankDefaultModelVaild,
@ -55,45 +99,68 @@ const ParamsConfig: FC = () => {
} as any
}
setDatasetConfigs(config)
setOpen(false)
setRerankSettingModalOpen(false)
}
const handleSetTempDataSetConfigs = (newDatasetConfigs: DatasetConfigs) => {
const { datasets, retrieval_model, score_threshold_enabled, ...restConfigs } = newDatasetConfigs
const retrievalConfig = getMultipleRetrievalConfig({
top_k: restConfigs.top_k,
score_threshold: restConfigs.score_threshold,
reranking_model: restConfigs.reranking_model && {
provider: restConfigs.reranking_model.reranking_provider_name,
model: restConfigs.reranking_model.reranking_model_name,
},
reranking_mode: restConfigs.reranking_mode,
weights: restConfigs.weights,
reranking_enable: restConfigs.reranking_enable,
}, selectedDatasets)
setTempDataSetConfigs({
...retrievalConfig,
reranking_model: restConfigs.reranking_model && {
reranking_provider_name: restConfigs.reranking_model.reranking_provider_name,
reranking_model_name: restConfigs.reranking_model.reranking_model_name,
},
retrieval_model,
score_threshold_enabled,
datasets,
})
}
return (
<div>
<div
className={cn('flex items-center rounded-md h-7 px-3 space-x-1 text-gray-700 cursor-pointer hover:bg-gray-200', open && 'bg-gray-200')}
<Button
variant='ghost'
size='small'
className={cn('h-7', rerankSettingModalOpen && 'bg-components-button-ghost-bg-hover')}
onClick={() => {
setTempDataSetConfigs({
...datasetConfigs,
top_k: datasetConfigs.top_k || DATASET_DEFAULT.top_k,
score_threshold: datasetConfigs.score_threshold || DATASET_DEFAULT.score_threshold,
})
setOpen(true)
setRerankSettingModalOpen(true)
}}
disabled={disabled}
>
<Settings04 className="w-[14px] h-[14px]" />
<div className='text-xs font-medium'>
{t('appDebug.datasetConfig.params')}
</div>
</div>
<RiEqualizer2Line className='mr-1 w-3.5 h-3.5' />
{t('dataset.retrievalSettings')}
</Button>
{
open && (
rerankSettingModalOpen && (
<Modal
isShow={open}
isShow={rerankSettingModalOpen}
onClose={() => {
setOpen(false)
setRerankSettingModalOpen(false)
}}
className='sm:min-w-[528px]'
title={t('appDebug.datasetConfig.settingTitle')}
>
<ConfigContent
datasetConfigs={tempDataSetConfigs}
onChange={setTempDataSetConfigs}
onChange={handleSetTempDataSetConfigs}
selectedDatasets={selectedDatasets}
/>
<div className='mt-6 flex justify-end'>
<Button className='mr-2 flex-shrink-0' onClick={() => {
setOpen(false)
setRerankSettingModalOpen(false)
}}>{t('common.operation.cancel')}</Button>
<Button variant='primary' className='flex-shrink-0' onClick={handleSave} >{t('common.operation.save')}</Button>
</div>

View File

@ -0,0 +1,112 @@
import { memo, useCallback } from 'react'
import { useTranslation } from 'react-i18next'
import {
DEFAULT_WEIGHTED_SCORE,
WeightedScoreEnum,
} from '@/models/datasets'
import Slider from '@/app/components/base/slider'
import cn from '@/utils/classnames'
const formatNumber = (value: number) => {
if (value > 0 && value < 1)
return `0.${value * 10}`
else if (value === 1)
return '1.0'
return value
}
type Value = {
type: WeightedScoreEnum
value: number[]
}
type WeightedScoreProps = {
value: Value
onChange: (value: Value) => void
}
const WeightedScore = ({
value,
onChange = () => {},
}: WeightedScoreProps) => {
const { t } = useTranslation()
const options = [
{
value: WeightedScoreEnum.SemanticFirst,
label: t('dataset.weightedScore.semanticFirst'),
},
{
value: WeightedScoreEnum.KeywordFirst,
label: t('dataset.weightedScore.keywordFirst'),
},
{
value: WeightedScoreEnum.Customized,
label: t('dataset.weightedScore.customized'),
},
]
const disabled = value.type !== WeightedScoreEnum.Customized
const handleTypeChange = useCallback((type: WeightedScoreEnum) => {
const result = { ...value, type }
if (type === WeightedScoreEnum.SemanticFirst)
result.value = [DEFAULT_WEIGHTED_SCORE.semanticFirst.semantic, DEFAULT_WEIGHTED_SCORE.semanticFirst.keyword]
if (type === WeightedScoreEnum.KeywordFirst)
result.value = [DEFAULT_WEIGHTED_SCORE.keywordFirst.semantic, DEFAULT_WEIGHTED_SCORE.keywordFirst.keyword]
onChange(result)
}, [value, onChange])
return (
<div>
<div className='flex items-center mb-1 space-x-4'>
{
options.map(option => (
<div
key={option.value}
className='flex py-1.5 max-w-[calc((100%-32px)/3)] system-sm-regular text-text-secondary cursor-pointer'
onClick={() => handleTypeChange(option.value)}
>
<div
className={cn(
'shrink-0 mr-2 w-4 h-4 bg-components-radio-bg border border-components-radio-border rounded-full shadow-xs',
value.type === option.value && 'border-[5px] border-components-radio-border-checked',
)}
></div>
<div className='truncate' title={option.label}>{option.label}</div>
</div>
))
}
</div>
<div className='flex items-center px-3 h-9 space-x-3 rounded-lg border border-components-panel-border'>
<div className='shrink-0 flex items-center w-[90px] system-xs-semibold-uppercase text-util-colors-blue-blue-500'>
<div className='mr-1 truncate uppercase' title={t('dataset.weightedScore.semantic') || ''}>
{t('dataset.weightedScore.semantic')}
</div>
{formatNumber(value.value[0])}
</div>
<Slider
className={cn('grow h-0.5 bg-gradient-to-r from-[#53B1FD] to-[#2ED3B7]', disabled && 'cursor-not-allowed')}
max={1.0}
min={0}
step={0.1}
value={value.value[0]}
onChange={v => onChange({ type: value.type, value: [v, (10 - v * 10) / 10] })}
disabled={disabled}
thumbClassName={cn(disabled && '!cursor-not-allowed')}
trackClassName='!bg-transparent'
/>
<div className='shrink-0 flex items-center justify-end w-[90px] system-xs-semibold-uppercase text-util-colors-cyan-cyan-500'>
{formatNumber(value.value[1])}
<div className='ml-1 truncate uppercase' title={t('dataset.weightedScore.keyword') || ''}>
{t('dataset.weightedScore.keyword')}
</div>
</div>
</div>
</div>
)
}
export default memo(WeightedScore)

View File

@ -13,7 +13,8 @@ import type { DataSet } from '@/models/datasets'
import Button from '@/app/components/base/button'
import { fetchDatasets } from '@/service/datasets'
import Loading from '@/app/components/base/loading'
import { formatNumber } from '@/utils/format'
import Badge from '@/app/components/base/badge'
import { useKnowledge } from '@/hooks/use-knowledge'
export type ISelectDataSetProps = {
isShow: boolean
@ -38,6 +39,7 @@ const SelectDataSet: FC<ISelectDataSetProps> = ({
const listRef = useRef<HTMLDivElement>(null)
const [page, setPage, getPage] = useGetState(1)
const [isNoMore, setIsNoMore] = useState(false)
const { formatIndexingTechniqueAndMethod } = useKnowledge()
useInfiniteScroll(
async () => {
@ -45,7 +47,7 @@ const SelectDataSet: FC<ISelectDataSetProps> = ({
const { data, has_more } = await fetchDatasets({ url: '/datasets', params: { page } })
setPage(getPage() + 1)
setIsNoMore(!has_more)
const newList = [...(datasets || []), ...data]
const newList = [...(datasets || []), ...data.filter(item => item.indexing_technique)]
setDataSets(newList)
setLoaded(true)
if (!selected.find(item => !item.name))
@ -136,14 +138,13 @@ const SelectDataSet: FC<ISelectDataSetProps> = ({
<span className='ml-1 shrink-0 px-1 border boder-gray-200 rounded-md text-gray-500 text-xs font-normal leading-[18px]'>{t('dataset.unavailable')}</span>
)}
</div>
<div className={cn('shrink-0 flex text-xs text-gray-500 overflow-hidden whitespace-nowrap', !item.embedding_available && 'opacity-50')}>
<span className='max-w-[100px] overflow-hidden text-ellipsis whitespace-nowrap'>{formatNumber(item.word_count)}</span>
{t('appDebug.feature.dataSet.words')}
<span className='px-0.5'>·</span>
<span className='max-w-[100px] min-w-[8px] overflow-hidden text-ellipsis whitespace-nowrap'>{formatNumber(item.document_count)} </span>
{t('appDebug.feature.dataSet.textBlocks')}
</div>
{
item.indexing_technique && (
<Badge
text={formatIndexingTechniqueAndMethod(item.indexing_technique, item.retrieval_model_dict?.search_method)}
/>
)
}
</div>
))}
</div>

View File

@ -259,7 +259,7 @@ const SettingsModal: FC<SettingsModalProps> = ({
{/* Retrieval Method Config */}
<div className={rowClass}>
<div className={labelClass}>
<div className={cn(labelClass, 'w-auto min-w-[168px]')}>
<div>
<div>{t('datasetSettings.form.retrievalSetting.title')}</div>
<div className='leading-[18px] text-xs font-normal text-gray-500'>
@ -268,7 +268,7 @@ const SettingsModal: FC<SettingsModalProps> = ({
</div>
</div>
</div>
<div className='w-[480px]'>
<div>
{indexMethod === 'high_quality'
? (
<RetrievalMethodConfig

View File

@ -46,7 +46,7 @@ import { fetchDatasets } from '@/service/datasets'
import { useProviderContext } from '@/context/provider-context'
import { AgentStrategy, AppType, ModelModeType, RETRIEVE_TYPE, Resolution, TransferMethod } from '@/types/app'
import { PromptMode } from '@/models/debug'
import { ANNOTATION_DEFAULT, DEFAULT_AGENT_SETTING, DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import { ANNOTATION_DEFAULT, DATASET_DEFAULT, DEFAULT_AGENT_SETTING, DEFAULT_CHAT_PROMPT_CONFIG, DEFAULT_COMPLETION_PROMPT_CONFIG } from '@/config'
import SelectDataSet from '@/app/components/app/configuration/dataset-config/select-dataset'
import { useModalContext } from '@/context/modal-context'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
@ -57,6 +57,10 @@ import { useTextGenerationCurrentProviderAndModelAndModelList } from '@/app/comp
import { fetchCollectionList } from '@/service/tools'
import { type Collection } from '@/app/components/tools/types'
import { useStore as useAppStore } from '@/app/components/app/store'
import {
getMultipleRetrievalConfig,
getSelectedDatasetsMode,
} from '@/app/components/workflow/nodes/knowledge-retrieval/utils'
type PublishConfig = {
modelConfig: ModelConfig
@ -174,14 +178,14 @@ const Configuration: FC = () => {
}, [])
const [datasetConfigs, setDatasetConfigs] = useState<DatasetConfigs>({
retrieval_model: RETRIEVE_TYPE.oneWay,
retrieval_model: RETRIEVE_TYPE.multiWay,
reranking_model: {
reranking_provider_name: '',
reranking_model_name: '',
},
top_k: 2,
top_k: DATASET_DEFAULT.top_k,
score_threshold_enabled: false,
score_threshold: 0.7,
score_threshold: DATASET_DEFAULT.score_threshold,
datasets: {
datasets: [],
},
@ -202,6 +206,7 @@ const Configuration: FC = () => {
const hasSetContextVar = !!contextVar
const [isShowSelectDataSet, { setTrue: showSelectDataSet, setFalse: hideSelectDataSet }] = useBoolean(false)
const selectedIds = dataSets.map(item => item.id)
const [rerankSettingModalOpen, setRerankSettingModalOpen] = useState(false)
const handleSelect = (data: DataSet[]) => {
if (isEqual(data.map(item => item.id), dataSets.map(item => item.id))) {
hideSelectDataSet()
@ -209,6 +214,7 @@ const Configuration: FC = () => {
}
formattingChangedDispatcher()
let newDatasets = data
if (data.find(item => !item.name)) { // has not loaded selected dataset
const newSelected = produce(data, (draft: any) => {
data.forEach((item, index) => {
@ -220,11 +226,45 @@ const Configuration: FC = () => {
})
})
setDataSets(newSelected)
newDatasets = newSelected
}
else {
setDataSets(data)
}
hideSelectDataSet()
const {
allEconomic,
mixtureHighQualityAndEconomic,
inconsistentEmbeddingModel,
} = getSelectedDatasetsMode(newDatasets)
if (allEconomic || mixtureHighQualityAndEconomic || inconsistentEmbeddingModel)
setRerankSettingModalOpen(true)
const { datasets, retrieval_model, score_threshold_enabled, ...restConfigs } = datasetConfigs
const retrievalConfig = getMultipleRetrievalConfig({
top_k: restConfigs.top_k,
score_threshold: restConfigs.score_threshold,
reranking_model: restConfigs.reranking_model && {
provider: restConfigs.reranking_model.reranking_provider_name,
model: restConfigs.reranking_model.reranking_model_name,
},
reranking_mode: restConfigs.reranking_mode,
weights: restConfigs.weights,
reranking_enable: restConfigs.reranking_enable,
}, newDatasets)
setDatasetConfigs({
...retrievalConfig,
reranking_model: restConfigs.reranking_model && {
reranking_provider_name: restConfigs.reranking_model.reranking_provider_name,
reranking_model_name: restConfigs.reranking_model.reranking_model_name,
},
retrieval_model,
score_threshold_enabled,
datasets,
})
}
const [isShowHistoryModal, { setTrue: showHistoryModal, setFalse: hideHistoryModal }] = useBoolean(false)
@ -509,7 +549,7 @@ const Configuration: FC = () => {
syncToPublishedConfig(config)
setPublishedConfig(config)
setDatasetConfigs({
retrieval_model: RETRIEVE_TYPE.oneWay,
retrieval_model: RETRIEVE_TYPE.multiWay,
...modelConfig.dataset_configs,
})
setHasFetchedDetail(true)
@ -744,6 +784,8 @@ const Configuration: FC = () => {
isShowVisionConfig,
visionConfig,
setVisionConfig: handleSetVisionConfig,
rerankSettingModalOpen,
setRerankSettingModalOpen,
}}
>
<>

View File

@ -0,0 +1,49 @@
@tailwind components;
@layer components {
.action-btn {
@apply inline-flex justify-center items-center cursor-pointer text-text-tertiary
hover:text-text-secondary
hover:bg-state-base-hover
}
.action-btn-disabled {
@apply cursor-not-allowed
}
.action-btn-xl {
@apply p-2 w-9 h-9 rounded-lg
}
.action-btn-l {
@apply p-1.5 w-[34px] h-[34px] rounded-lg
}
/* m is for the regular button */
.action-btn-m {
@apply p-0.5 w-6 h-6 rounded-lg
}
.action-btn-xs {
@apply p-0 w-5 h-5 rounded
}
.action-btn.action-btn-active {
@apply
text-text-accent
bg-state-accent-active
hover:bg-state-accent-active-alt
}
.action-btn.action-btn-disabled {
@apply
text-text-disabled
}
.action-btn.action-btn-destructive {
@apply
text-text-destructive
bg-state-destructive-hover
}
}

View File

@ -0,0 +1,70 @@
import type { CSSProperties } from 'react'
import React from 'react'
import { type VariantProps, cva } from 'class-variance-authority'
import classNames from '@/utils/classnames'
enum ActionButtonState {
Destructive = 'destructive',
Active = 'active',
Disabled = 'disabled',
Default = '',
}
const actionButtonVariants = cva(
'action-btn',
{
variants: {
size: {
xs: 'action-btn-xs',
m: 'action-btn-m',
l: 'action-btn-l',
xl: 'action-btn-xl',
},
},
defaultVariants: {
size: 'm',
},
},
)
export type ActionButtonProps = {
size?: 'xs' | 'm' | 'l' | 'xl'
state?: ActionButtonState
styleCss?: CSSProperties
} & React.ButtonHTMLAttributes<HTMLButtonElement> & VariantProps<typeof actionButtonVariants>
function getActionButtonState(state: ActionButtonState) {
switch (state) {
case ActionButtonState.Destructive:
return 'action-btn-destructive'
case ActionButtonState.Active:
return 'action-btn-active'
case ActionButtonState.Disabled:
return 'action-btn-disabled'
default:
return ''
}
}
const ActionButton = React.forwardRef<HTMLButtonElement, ActionButtonProps>(
({ className, size, state = ActionButtonState.Default, styleCss, children, ...props }, ref) => {
return (
<button
type='button'
className={classNames(
actionButtonVariants({ className, size }),
getActionButtonState(state),
)}
ref={ref}
style={styleCss}
{...props}
>
{children}
</button>
)
},
)
ActionButton.displayName = 'ActionButton'
export default ActionButton
export { ActionButton, ActionButtonState, actionButtonVariants }

View File

@ -0,0 +1,25 @@
import { memo } from 'react'
import cn from '@/utils/classnames'
type BadgeProps = {
className?: string
text: string
}
const Badge = ({
className,
text,
}: BadgeProps) => {
return (
<div
className={cn(
'inline-flex items-center px-[5px] h-5 rounded-[5px] border border-divider-deep system-2xs-medium-uppercase leading-3 text-text-tertiary',
className,
)}
>
{text}
</div>
)
}
export default memo(Badge)

View File

@ -2,46 +2,185 @@
@layer components {
.btn {
@apply inline-flex justify-center items-center border-[0.5px] font-medium cursor-pointer whitespace-nowrap shadow;
@apply inline-flex justify-center items-center cursor-pointer whitespace-nowrap;
}
.btn-disabled {
@apply opacity-60 cursor-not-allowed;
@apply cursor-not-allowed;
}
.btn-small {
@apply px-2 h-6 rounded-md text-xs
@apply px-2 h-6 rounded-md text-xs font-medium;
}
.btn-medium {
@apply px-3.5 h-8 rounded-lg text-[13px]
@apply px-3.5 h-8 rounded-lg text-[13px] leading-4 font-medium;
}
.btn-large {
@apply px-4 h-9 rounded-[10px] text-sm font-semibold
}
.btn-secondary {
@apply bg-white hover:bg-white/80 border-gray-200 hover:border-gray-300 text-gray-700;
}
.btn-secondary-accent {
@apply bg-white hover:bg-white/80 border-gray-200 hover:border-gray-300 text-primary-600;
@apply px-4 h-9 rounded-[10px] text-sm font-semibold;
}
.btn-primary {
@apply bg-primary-600 hover:bg-primary-700 text-white;
@apply
shadow
bg-components-button-primary-bg
border-components-button-primary-border
hover:bg-components-button-primary-bg-hover
hover:border-components-button-primary-border-hover
text-components-button-primary-text;
}
.btn-primary.btn-destructive {
@apply
bg-components-button-destructive-primary-bg
border-components-button-destructive-primary-border
hover:bg-components-button-destructive-primary-bg-hover
hover:border-components-button-destructive-primary-border-hover
text-components-button-destructive-primary-text;
}
.btn-primary.btn-disabled {
@apply
shadow-none
bg-components-button-primary-bg-disabled
border-components-button-primary-border-disabled
text-components-button-primary-text-disabled;
}
.btn-primary.btn-destructive.btn-disabled {
@apply
shadow-none
bg-components-button-destructive-primary-bg-disabled
border-components-button-destructive-primary-border-disabled
text-components-button-destructive-primary-text-disabled;
}
.btn-secondary {
@apply
border-[0.5px]
shadow-xs
bg-components-button-secondary-bg
border-components-button-secondary-border
hover:bg-components-button-secondary-bg-hover
hover:border-components-button-secondary-border-hover
text-components-button-secondary-text;
}
.btn-secondary.btn-disabled {
@apply
bg-components-button-secondary-bg-disabled
border-components-button-secondary-border-disabled
text-components-button-secondary-text-disabled;
}
.btn-secondary.btn-destructive {
@apply
bg-components-button-destructive-secondary-bg
border-components-button-destructive-secondary-border
hover:bg-components-button-destructive-secondary-bg-hover
hover:border-components-button-destructive-secondary-border-hover
text-components-button-destructive-secondary-text;
}
.btn-secondary.btn-destructive.btn-disabled {
@apply
bg-components-button-destructive-secondary-bg-disabled
border-components-button-destructive-secondary-border-disabled
text-components-button-destructive-secondary-text-disabled;
}
.btn-secondary-accent {
@apply
border-[0.5px]
shadow-xs
bg-components-button-secondary-bg
border-components-button-secondary-border
hover:bg-components-button-secondary-bg-hover
hover:border-components-button-secondary-border-hover
text-components-button-secondary-accent-text;
}
.btn-secondary-accent.btn-disabled {
@apply
bg-components-button-secondary-bg-disabled
border-components-button-secondary-border-disabled
text-components-button-secondary-accent-text-disabled;
}
.btn-warning {
@apply bg-red-600 hover:bg-red-700 text-white;
@apply
bg-components-button-destructive-primary-bg
border-components-button-destructive-primary-border
hover:bg-components-button-destructive-primary-bg-hover
hover:border-components-button-destructive-primary-border-hover
text-components-button-destructive-primary-text;
}
.btn-ghost {
@apply bg-transparent hover:bg-gray-200 border-transparent shadow-none text-gray-700;
.btn-warning.btn-disabled {
@apply
bg-components-button-destructive-primary-bg-disabled
border-components-button-destructive-primary-border-disabled
text-components-button-destructive-primary-text-disabled;
}
.btn-tertiary {
@apply bg-[#F2F4F7] hover:bg-[#E9EBF0] border-transparent shadow-none text-gray-700;
@apply
bg-components-button-tertiary-bg
hover:bg-components-button-tertiary-bg-hover
text-components-button-tertiary-text;
}
.btn-tertiary.btn-disabled {
@apply
bg-components-button-tertiary-bg-disabled
text-components-button-tertiary-text-disabled;
}
.btn-tertiary.btn-destructive {
@apply
bg-components-button-destructive-tertiary-bg
hover:bg-components-button-destructive-tertiary-bg-hover
text-components-button-destructive-tertiary-text;
}
.btn-tertiary.btn-destructive.btn-disabled {
@apply
bg-components-button-destructive-tertiary-bg-disabled
text-components-button-destructive-tertiary-text-disabled;
}
.btn-ghost {
@apply
hover:bg-components-button-ghost-bg-hover
text-components-button-ghost-text;
}
.btn-ghost.btn-disabled {
@apply
text-components-button-ghost-text-disabled;
}
.btn-ghost.btn-destructive {
@apply
hover:bg-components-button-destructive-ghost-bg-hover
text-components-button-destructive-ghost-text;
}
.btn-ghost.btn-destructive.btn-disabled {
@apply
text-components-button-destructive-ghost-text-disabled;
}
.btn-ghost-accent {
@apply
hover:bg-state-accent-hover
text-components-button-secondary-accent-text;
}
.btn-ghost-accent.btn-disabled {
@apply
text-components-button-secondary-accent-text-disabled;
}
}

View File

@ -14,6 +14,7 @@ const buttonVariants = cva(
'secondary': 'btn-secondary',
'secondary-accent': 'btn-secondary-accent',
'ghost': 'btn-ghost',
'ghost-accent': 'btn-ghost-accent',
'tertiary': 'btn-tertiary',
},
size: {
@ -30,16 +31,20 @@ const buttonVariants = cva(
)
export type ButtonProps = {
destructive?: boolean
loading?: boolean
styleCss?: CSSProperties
} & React.ButtonHTMLAttributes<HTMLButtonElement> & VariantProps<typeof buttonVariants>
const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
({ className, variant, size, loading, styleCss, children, ...props }, ref) => {
({ className, variant, size, destructive, loading, styleCss, children, ...props }, ref) => {
return (
<button
type='button'
className={classNames(buttonVariants({ variant, size, className }))}
className={classNames(
buttonVariants({ variant, size, className }),
destructive && 'btn-destructive',
)}
ref={ref}
style={styleCss}
{...props}

View File

@ -111,7 +111,7 @@ const ChatWrapper = () => {
)
}
return <div className='mb-6' />
return null
}, [currentConversationId, inputsForms, isMobile])
return (

View File

@ -3,6 +3,8 @@ import {
useState,
} from 'react'
import { useAsyncEffect } from 'ahooks'
import { useTranslation } from 'react-i18next'
import { RiLoopLeftLine } from '@remixicon/react'
import {
EmbeddedChatbotContext,
useEmbeddedChatbotContext,
@ -19,8 +21,10 @@ import LogoHeader from '@/app/components/base/logo/logo-embeded-chat-header'
import Header from '@/app/components/base/chat/embedded-chatbot/header'
import ConfigPanel from '@/app/components/base/chat/embedded-chatbot/config-panel'
import ChatWrapper from '@/app/components/base/chat/embedded-chatbot/chat-wrapper'
import Tooltip from '@/app/components/base/tooltip'
const Chatbot = () => {
const { t } = useTranslation()
const {
isMobile,
appInfoError,
@ -80,7 +84,22 @@ const Chatbot = () => {
<Loading type='app' />
)}
{chatReady && !appChatListDataLoading && (
<ChatWrapper />
<div className='relative h-full pt-8 mx-auto w-full max-w-[720px]'>
{!isMobile && (
<div className='absolute top-2.5 right-3 z-20'>
<Tooltip
selector={'embed-scene-restart-button'}
htmlContent={t('share.chat.resetChat')}
position='top'
>
<div className='p-1.5 bg-white border-[0.5px] border-gray-100 rounded-lg shadow-md cursor-pointer' onClick={handleNewConversation}>
<RiLoopLeftLine className="h-4 w-4 text-gray-500"/>
</div>
</Tooltip>
</div>
)}
<ChatWrapper />
</div>
)}
</div>
</div>

View File

@ -0,0 +1,4 @@
<svg width="14" height="14" viewBox="0 0 14 14" fill="none" xmlns="http://www.w3.org/2000/svg">
<path opacity="0.5" d="M10.5402 2.95679L10.5402 2.95685C10.4455 3.05146 10.3424 3.13459 10.2314 3.2072C10.3429 3.27923 10.4468 3.36165 10.5422 3.45535L10.5402 2.95679ZM10.5402 2.95679C10.6348 2.86217 10.718 2.75907 10.7906 2.64807C10.8626 2.75955 10.945 2.86339 11.0387 2.95881L11.0388 2.95888C11.1304 3.05224 11.2302 3.13482 11.3377 3.20717C11.2297 3.27895 11.1292 3.36081 11.0367 3.45327L11.0366 3.45333C10.9442 3.5458 10.8623 3.64635 10.7905 3.75431M10.5402 2.95679L10.7905 3.75431M10.7905 3.75431C10.7182 3.64686 10.6356 3.54707 10.5422 3.45538L10.7905 3.75431Z" stroke="#155EEF" stroke-width="1.25"/>
<path d="M6.99659 2.85105C6.96323 2.55641 6.71414 2.33368 6.41758 2.33337C6.12107 2.33307 5.87146 2.55529 5.83751 2.84987C5.67932 4.2213 5.27205 5.16213 4.6339 5.80028C3.99575 6.43841 3.05492 6.84569 1.68349 7.00389C1.3889 7.03784 1.16669 7.28745 1.16699 7.58396C1.1673 7.88052 1.39002 8.12961 1.68467 8.16297C3.03291 8.31569 3.99517 8.72292 4.64954 9.36546C5.30035 10.0045 5.71535 10.944 5.83593 12.3017C5.86271 12.6029 6.11523 12.8337 6.41763 12.8334C6.72009 12.833 6.97209 12.6016 6.99817 12.3003C7.11367 10.9656 7.52836 10.005 8.18344 9.34982C8.83858 8.69474 9.79922 8.28005 11.1339 8.16455C11.4352 8.13847 11.6666 7.88647 11.667 7.58402C11.6673 7.28162 11.4365 7.02909 11.1353 7.00232C9.77758 6.88174 8.83812 6.46676 8.19908 5.81592C7.55653 5.16155 7.14931 4.19929 6.99659 2.85105Z" fill="#155EEF"/>
</svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -0,0 +1,37 @@
{
"icon": {
"type": "element",
"isRootNode": true,
"name": "svg",
"attributes": {
"width": "14",
"height": "14",
"viewBox": "0 0 14 14",
"fill": "none",
"xmlns": "http://www.w3.org/2000/svg"
},
"children": [
{
"type": "element",
"name": "path",
"attributes": {
"opacity": "0.5",
"d": "M10.5402 2.95679L10.5402 2.95685C10.4455 3.05146 10.3424 3.13459 10.2314 3.2072C10.3429 3.27923 10.4468 3.36165 10.5422 3.45535L10.5402 2.95679ZM10.5402 2.95679C10.6348 2.86217 10.718 2.75907 10.7906 2.64807C10.8626 2.75955 10.945 2.86339 11.0387 2.95881L11.0388 2.95888C11.1304 3.05224 11.2302 3.13482 11.3377 3.20717C11.2297 3.27895 11.1292 3.36081 11.0367 3.45327L11.0366 3.45333C10.9442 3.5458 10.8623 3.64635 10.7905 3.75431M10.5402 2.95679L10.7905 3.75431M10.7905 3.75431C10.7182 3.64686 10.6356 3.54707 10.5422 3.45538L10.7905 3.75431Z",
"stroke": "currentColor",
"stroke-width": "1.25"
},
"children": []
},
{
"type": "element",
"name": "path",
"attributes": {
"d": "M6.99659 2.85105C6.96323 2.55641 6.71414 2.33368 6.41758 2.33337C6.12107 2.33307 5.87146 2.55529 5.83751 2.84987C5.67932 4.2213 5.27205 5.16213 4.6339 5.80028C3.99575 6.43841 3.05492 6.84569 1.68349 7.00389C1.3889 7.03784 1.16669 7.28745 1.16699 7.58396C1.1673 7.88052 1.39002 8.12961 1.68467 8.16297C3.03291 8.31569 3.99517 8.72292 4.64954 9.36546C5.30035 10.0045 5.71535 10.944 5.83593 12.3017C5.86271 12.6029 6.11523 12.8337 6.41763 12.8334C6.72009 12.833 6.97209 12.6016 6.99817 12.3003C7.11367 10.9656 7.52836 10.005 8.18344 9.34982C8.83858 8.69474 9.79922 8.28005 11.1339 8.16455C11.4352 8.13847 11.6666 7.88647 11.667 7.58402C11.6673 7.28162 11.4365 7.02909 11.1353 7.00232C9.77758 6.88174 8.83812 6.46676 8.19908 5.81592C7.55653 5.16155 7.14931 4.19929 6.99659 2.85105Z",
"fill": "currentColor"
},
"children": []
}
]
},
"name": "Generator"
}

View File

@ -0,0 +1,16 @@
// GENERATE BY script
// DON NOT EDIT IT MANUALLY
import * as React from 'react'
import data from './Generator.json'
import IconBase from '@/app/components/base/icons/IconBase'
import type { IconBaseProps, IconData } from '@/app/components/base/icons/IconBase'
const Icon = React.forwardRef<React.MutableRefObject<SVGElement>, Omit<IconBaseProps, 'data'>>((
props,
ref,
) => <IconBase {...props} ref={ref} data={data as IconData} />)
Icon.displayName = 'Generator'
export default Icon

View File

@ -0,0 +1 @@
export { default as Generator } from './Generator'

View File

@ -33,7 +33,7 @@
"attributes": {
"d": "M6.96353 1.5547C7.40657 0.890144 6.93018 0 6.13148 0H0V12L6.96353 1.5547Z",
"fill": "currentColor",
"fill-opacity": "0"
"fill-opacity": "0.5"
},
"children": []
}

View File

@ -1,7 +1,6 @@
'use client'
import type { FC } from 'react'
import React from 'react'
import s from './style.module.css'
import cn from '@/utils/classnames'
type Props = {
@ -29,7 +28,12 @@ const RadioCard: FC<Props> = ({
chosenConfigWrapClassName,
}) => {
return (
<div className={cn(s.item, isChosen && s.active)}>
<div
className={cn(
'border border-components-option-card-option-border bg-components-option-card-option-bg rounded-xl hover:shadow-xs cursor-pointer',
isChosen && 'bg-components-option-card-option-selected-bg border-components-panel-border shadow-xs',
)}
>
<div className='flex py-3 pl-3 pr-4' onClick={onChosen}>
<div className={cn(iconBgClassName, 'mr-3 shrink-0 flex w-8 justify-center h-8 items-center rounded-lg')}>
{icon}
@ -40,12 +44,15 @@ const RadioCard: FC<Props> = ({
</div>
{!noRadio && (
<div className='shrink-0 flex items-center h-8'>
<div className={s.radio}></div>
<div className={cn(
'w-4 h-4 border border-components-radio-border bg-components-radio-bg shadow-xs rounded-full',
isChosen && 'border-[5px] border-components-radio-border-checked',
)}></div>
</div>
)}
</div>
{((isChosen && chosenConfig) || noRadio) && (
<div className={cn(chosenConfigWrapClassName, 'pt-2 px-14 pb-6 border-t border-gray-200')}>
<div className={cn(chosenConfigWrapClassName, 'p-3 border-t border-gray-200')}>
{chosenConfig}
</div>
)}

View File

@ -6,7 +6,7 @@ import cn from '@/utils/classnames'
type Props = {
className?: string
title: string
title: string | JSX.Element | null
description: string
isChosen: boolean
onChosen: () => void

View File

@ -1,25 +0,0 @@
.item {
@apply relative rounded-xl border border-gray-100 cursor-pointer;
background-color: #fcfcfd;
}
.item.active {
border-width: 1.5px;
border-color: #528BFF;
box-shadow: 0px 1px 3px rgba(16, 24, 40, 0.1), 0px 1px 2px rgba(16, 24, 40, 0.06);
}
.item:hover {
background-color: #ffffff;
border-color: #B2CCFF;
box-shadow: 0px 12px 16px -4px rgba(16, 24, 40, 0.08), 0px 4px 6px -2px rgba(16, 24, 40, 0.03);
}
.radio {
@apply w-4 h-4 border-[2px] border-gray-200 rounded-full;
}
.item.active .radio {
border-width: 5px;
border-color: #155EEF;
}

View File

@ -4,6 +4,8 @@ import './style.css'
type ISliderProps = {
className?: string
thumbClassName?: string
trackClassName?: string
value: number
max?: number
min?: number
@ -12,16 +14,26 @@ type ISliderProps = {
onChange: (value: number) => void
}
const Slider: React.FC<ISliderProps> = ({ className, max, min, step, value, disabled, onChange }) => {
const Slider: React.FC<ISliderProps> = ({
className,
thumbClassName,
trackClassName,
max,
min,
step,
value,
disabled,
onChange,
}) => {
return <ReactSlider
disabled={disabled}
value={isNaN(value) ? 0 : value}
min={min || 0}
max={max || 100}
step={step || 1}
className={cn(className, 'slider')}
thumbClassName="slider-thumb"
trackClassName="slider-track"
className={cn('slider', className)}
thumbClassName={cn('slider-thumb', thumbClassName)}
trackClassName={cn('slider-track', trackClassName)}
onChange={onChange}
/>
}

View File

@ -13,6 +13,7 @@ export type TooltipProps = {
hideArrow?: boolean
popupClassName?: string
offset?: OffsetOptions
asChild?: boolean
}
const arrow = (
@ -27,6 +28,7 @@ const Tooltip: FC<TooltipProps> = ({
hideArrow,
popupClassName,
offset,
asChild,
}) => {
const [open, setOpen] = useState(false)
const [isHoverPopup, {
@ -79,6 +81,7 @@ const Tooltip: FC<TooltipProps> = ({
}
}}
onMouseLeave={() => triggerMethod === 'hover' && handleLeave(true)}
asChild={asChild}
>
{children}
</PortalToFollowElemTrigger>

View File

@ -3,6 +3,7 @@ import type {
DefaultModelResponse,
Model,
} from '@/app/components/header/account-setting/model-provider-page/declarations'
import { RerankingModeEnum } from '@/models/datasets'
export const isReRankModelSelected = ({
rerankDefaultModel,
@ -32,7 +33,7 @@ export const isReRankModelSelected = ({
if (
indexMethod === 'high_quality'
&& (retrievalConfig.reranking_enable || retrievalConfig.search_method === RETRIEVE_METHOD.hybrid)
&& (retrievalConfig.search_method === RETRIEVE_METHOD.hybrid && retrievalConfig.reranking_mode !== RerankingModeEnum.WeightedScore)
&& !rerankModelSelected
)
return false

View File

@ -15,6 +15,11 @@ import type { RetrievalConfig } from '@/types/app'
import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector'
import { useModelListAndDefaultModel } from '@/app/components/header/account-setting/model-provider-page/hooks'
import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
import {
RerankingModeEnum,
WeightedScoreEnum,
} from '@/models/datasets'
import WeightedScore from '@/app/components/app/configuration/dataset-config/params-config/weighted-score'
type Props = {
type: RETRIEVE_METHOD
@ -34,6 +39,7 @@ const RetrievalParamConfig: FC<Props> = ({
defaultModel: rerankDefaultModel,
modelList: rerankModelList,
} = useModelListAndDefaultModel(ModelTypeEnum.rerank)
const isHybridSearch = type === RETRIEVE_METHOD.hybrid
const rerankModel = (() => {
if (value.reranking_model) {
@ -50,9 +56,47 @@ const RetrievalParamConfig: FC<Props> = ({
}
})()
const handleChangeRerankMode = (v: RerankingModeEnum) => {
if (v === value.reranking_mode)
return
const result = {
...value,
reranking_mode: v,
}
if (!result.weights && v === RerankingModeEnum.WeightedScore) {
result.weights = {
weight_type: WeightedScoreEnum.Customized,
vector_setting: {
vector_weight: 0.5,
embedding_provider_name: '',
embedding_model_name: '',
},
keyword_setting: {
keyword_weight: 0.5,
},
}
}
onChange(result)
}
const rerankingModeOptions = [
{
value: RerankingModeEnum.WeightedScore,
label: t('dataset.weightedScore.title'),
tips: t('dataset.weightedScore.description'),
},
{
value: RerankingModeEnum.RerankingModel,
label: t('common.modelProvider.rerankModel.key'),
tips: t('common.modelProvider.rerankModel.tip'),
},
]
return (
<div>
{!isEconomical && (
{!isEconomical && !isHybridSearch && (
<div>
<div className='flex h-8 items-center text-[13px] font-medium text-gray-900 space-x-2'>
{canToggleRerankModalEnable && (
@ -75,10 +119,10 @@ const RetrievalParamConfig: FC<Props> = ({
</div>
</div>
<ModelSelector
triggerClassName={`${!value.reranking_enable && type !== RETRIEVE_METHOD.hybrid && '!opacity-60 !cursor-not-allowed'}`}
triggerClassName={`${!value.reranking_enable && '!opacity-60 !cursor-not-allowed'}`}
defaultModel={rerankModel && { provider: rerankModel.provider_name, model: rerankModel.model_name }}
modelList={rerankModelList}
readonly={!value.reranking_enable && type !== RETRIEVE_METHOD.hybrid}
readonly={!value.reranking_enable}
onSelect={(v) => {
onChange({
...value,
@ -91,40 +135,152 @@ const RetrievalParamConfig: FC<Props> = ({
/>
</div>
)}
<div className={cn(!isEconomical && 'mt-4', 'flex space-between space-x-6')}>
<TopKItem
className='grow'
value={value.top_k}
onChange={(_key, v) => {
onChange({
...value,
top_k: v,
})
}}
enable={true}
/>
{(!isEconomical && !(value.search_method === RETRIEVE_METHOD.fullText && !value.reranking_enable)) && (
<ScoreThresholdItem
className='grow'
value={value.score_threshold}
onChange={(_key, v) => {
onChange({
...value,
score_threshold: v,
})
}}
enable={value.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={(_key, v) => {
onChange({
...value,
score_threshold_enabled: v,
})
}}
/>
)}
</div>
{
!isHybridSearch && (
<div className={cn(!isEconomical && 'mt-4', 'flex space-between space-x-6')}>
<TopKItem
className='grow'
value={value.top_k}
onChange={(_key, v) => {
onChange({
...value,
top_k: v,
})
}}
enable={true}
/>
{(!isEconomical && !(value.search_method === RETRIEVE_METHOD.fullText && !value.reranking_enable)) && (
<ScoreThresholdItem
className='grow'
value={value.score_threshold}
onChange={(_key, v) => {
onChange({
...value,
score_threshold: v,
})
}}
enable={value.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={(_key, v) => {
onChange({
...value,
score_threshold_enabled: v,
})
}}
/>
)}
</div>
)
}
{
isHybridSearch && (
<>
<div className='flex items-center justify-between'>
{
rerankingModeOptions.map(option => (
<div
key={option.value}
className={cn(
'flex items-center justify-center mb-4 w-[calc((100%-8px)/2)] h-8 rounded-lg border border-components-option-card-option-border bg-components-option-card-option-bg cursor-pointer system-sm-medium text-text-secondary',
value.reranking_mode === RerankingModeEnum.WeightedScore && option.value === RerankingModeEnum.WeightedScore && 'border-[1.5px] border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg text-text-primary',
value.reranking_mode !== RerankingModeEnum.WeightedScore && option.value !== RerankingModeEnum.WeightedScore && 'border-[1.5px] border-components-option-card-option-selected-border bg-components-option-card-option-selected-bg text-text-primary',
)}
onClick={() => handleChangeRerankMode(option.value)}
>
<div className='truncate'>{option.label}</div>
<Tooltip
popupContent={<div className='w-[200px]'>{option.tips}</div>}
hideArrow
>
<RiQuestionLine className='ml-0.5 w-3.5 h-4.5 text-text-quaternary' />
</Tooltip>
</div>
))
}
</div>
{
value.reranking_mode === RerankingModeEnum.WeightedScore && (
<WeightedScore
value={{
type: value.weights!.weight_type,
value: [
value.weights!.vector_setting.vector_weight,
value.weights!.keyword_setting.keyword_weight,
],
}}
onChange={(v) => {
onChange({
...value,
weights: {
...value.weights!,
weight_type: v.type,
vector_setting: {
...value.weights!.vector_setting,
vector_weight: v.value[0],
},
keyword_setting: {
...value.weights!.keyword_setting,
keyword_weight: v.value[1],
},
},
})
}}
/>
)
}
{
value.reranking_mode !== RerankingModeEnum.WeightedScore && (
<ModelSelector
triggerClassName={`${!value.reranking_enable && '!opacity-60 !cursor-not-allowed'}`}
defaultModel={rerankModel && { provider: rerankModel.provider_name, model: rerankModel.model_name }}
modelList={rerankModelList}
readonly={!value.reranking_enable}
onSelect={(v) => {
onChange({
...value,
reranking_model: {
reranking_provider_name: v.provider,
reranking_model_name: v.model,
},
})
}}
/>
)
}
<div className={cn(!isEconomical && 'mt-4', 'flex space-between space-x-6')}>
<TopKItem
className='grow'
value={value.top_k}
onChange={(_key, v) => {
onChange({
...value,
top_k: v,
})
}}
enable={true}
/>
<ScoreThresholdItem
className='grow'
value={value.score_threshold}
onChange={(_key, v) => {
onChange({
...value,
score_threshold: v,
})
}}
enable={value.score_threshold_enabled}
hasSwitch={true}
onSwitchChange={(_key, v) => {
onChange({
...value,
score_threshold_enabled: v,
})
}}
/>
</div>
</>
)
}
</div>
)
}

View File

@ -113,6 +113,10 @@ const Form = () => {
retrievalConfig,
indexMethod,
})
if (postRetrievalConfig.weights) {
postRetrievalConfig.weights.vector_setting.embedding_provider_name = currentDataset?.embedding_model_provider || ''
postRetrievalConfig.weights.vector_setting.embedding_model_name = currentDataset?.embedding_model || ''
}
try {
setLoading(true)
const requestParams = {

View File

@ -45,7 +45,7 @@ export default function AccountAbout({
IS_CE_EDITION
? <Link href={'https://github.com/langgenius/dify/blob/main/LICENSE'} target='_blank' rel='noopener noreferrer'>Open Source License</Link>
: <>
<Link href='https://dify.ai/privacy' target='_blank' rel='noopener noreferrer'>Privacy Policy</Link>,
<Link href='https://dify.ai/privacy' target='_blank' rel='noopener noreferrer'>Privacy Policy</Link>,<span> </span>
<Link href='https://dify.ai/terms' target='_blank' rel='noopener noreferrer'>Terms of Service</Link>
</>
}

View File

@ -22,7 +22,6 @@ const Icon = ({ svgString, active }: { svgString: string; active: boolean }) =>
return null
const parser = new DOMParser()
const doc = parser.parseFromString(svg, 'image/svg+xml')
console.log(doc.documentElement)
return doc.documentElement
}
useMount(() => {

View File

@ -248,6 +248,7 @@ const Workflow: FC<WorkflowProps> = memo(({
} = useWorkflow()
const { handleStartWorkflowRun } = useWorkflowStartRun()
const {
exportCheck,
handleExportDSL,
} = useDSL()
@ -334,7 +335,7 @@ const Workflow: FC<WorkflowProps> = memo(({
showImportDSLModal && (
<UpdateDSLModal
onCancel={() => setShowImportDSLModal(false)}
onBackup={handleExportDSL}
onBackup={exportCheck}
onImport={handlePaneContextmenuCancel}
/>
)

View File

@ -9,6 +9,7 @@ import { useTranslation } from 'react-i18next'
import { useBoolean } from 'ahooks'
import { BlockEnum, EditionType } from '../../../../types'
import type {
ModelConfig,
Node,
NodeOutPutVar,
Variable,
@ -16,6 +17,7 @@ import type {
import Wrap from '../editor/wrap'
import { CodeLanguage } from '../../../code/types'
import PromptGeneratorBtn from '../../../llm/components/prompt-generator-btn'
import cn from '@/utils/classnames'
import ToggleExpandBtn from '@/app/components/workflow/nodes/_base/components/toggle-expand-btn'
import useToggleExpend from '@/app/components/workflow/nodes/_base/hooks/use-toggle-expend'
@ -28,6 +30,7 @@ import s from '@/app/components/app/configuration/config-prompt/style.module.css
import { useEventEmitterContextContext } from '@/context/event-emitter'
import { PROMPT_EDITOR_INSERT_QUICKLY } from '@/app/components/base/prompt-editor/plugins/update-block'
import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development'
import ActionButton from '@/app/components/base/action-button'
import TooltipPlus from '@/app/components/base/tooltip-plus'
import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars'
import Switch from '@/app/components/base/switch'
@ -55,6 +58,9 @@ type Props = {
}
nodesOutputVars?: NodeOutPutVar[]
availableNodes?: Node[]
isSupportPromptGenerator?: boolean
onGenerated?: (prompt: string) => void
modelConfig?: ModelConfig
// for jinja
isSupportJinja?: boolean
editionType?: EditionType
@ -80,11 +86,14 @@ const Editor: FC<Props> = ({
hasSetBlockStatus,
nodesOutputVars,
availableNodes = [],
isSupportPromptGenerator,
isSupportJinja,
editionType,
onEditionTypeChange,
varList = [],
handleAddVariable,
onGenerated,
modelConfig,
}) => {
const { t } = useTranslation()
const { eventEmitter } = useEventEmitterContextContext()
@ -120,13 +129,17 @@ const Editor: FC<Props> = ({
<Wrap className={cn(className, wrapClassName)} style={wrapStyle} isInNode isExpand={isExpand}>
<div ref={ref} className={cn(isFocus ? s.gradientBorder : 'bg-gray-100', isExpand && 'h-full', '!rounded-[9px] p-0.5')}>
<div className={cn(isFocus ? 'bg-gray-50' : 'bg-gray-100', isExpand && 'h-full flex flex-col', 'rounded-lg')}>
<div className={cn(headerClassName, 'pt-1 pl-3 pr-2 flex justify-between h-6 items-center')}>
<div className={cn(headerClassName, 'pt-1 pl-3 pr-2 flex justify-between items-center')}>
<div className='leading-4 text-xs font-semibold text-gray-700 uppercase'>{title}</div>
<div className='flex items-center'>
<div className='leading-[18px] text-xs font-medium text-gray-500'>{value?.length || 0}</div>
{isSupportPromptGenerator && (
<PromptGeneratorBtn className='ml-[5px]' onGenerated={onGenerated} modelConfig={modelConfig} />
)}
<div className='w-px h-3 ml-2 mr-2 bg-gray-200'></div>
{/* Operations */}
<div className='flex items-center space-x-2'>
<div className='flex items-center space-x-[2px]'>
{isSupportJinja && (
<TooltipPlus
popupContent={
@ -153,19 +166,28 @@ const Editor: FC<Props> = ({
{!readOnly && (
<TooltipPlus
popupContent={`${t('workflow.common.insertVarTip')}`}
asChild
>
<Variable02 className='w-3.5 h-3.5 text-gray-500 cursor-pointer' onClick={handleInsertVariable} />
<ActionButton onClick={handleInsertVariable}>
<Variable02 className='w-4 h-4' />
</ActionButton>
</TooltipPlus>
)}
{showRemove && (
<RiDeleteBinLine className='w-3.5 h-3.5 text-gray-500 cursor-pointer' onClick={onRemove} />
<ActionButton onClick={onRemove}>
<RiDeleteBinLine className='w-4 h-4' />
</ActionButton>
)}
{!isCopied
? (
<Clipboard className='w-3.5 h-3.5 text-gray-500 cursor-pointer' onClick={handleCopy} />
<ActionButton onClick={handleCopy}>
<Clipboard className='w-4 h-4' />
</ActionButton>
)
: (
<ClipboardCheck className='mx-1 w-3.5 h-3.5 text-gray-500' />
<ActionButton>
<ClipboardCheck className='w-4 h-4' />
</ActionButton>
)
}
<ToggleExpandBtn isExpand={isExpand} onExpandChange={setIsExpand} />

View File

@ -63,7 +63,7 @@ const TypeSelector: FC<Props> = ({
<div
onClick={toggleShow}
className={cn(showOption && 'bg-black/5', 'flex items-center h-5 pl-1 pr-0.5 rounded-md text-xs font-semibold text-gray-700 cursor-pointer hover:bg-black/5')}>
<div className={cn(triggerClassName, 'text-sm font-semibold', uppercase && 'uppercase', noValue && 'text-gray-400')}>{!noValue ? item?.label : placeholder}</div>
<div className={cn(triggerClassName, 'text-xs font-semibold', uppercase && 'uppercase', noValue && 'text-gray-400')}>{!noValue ? item?.label : placeholder}</div>
{!readonly && <DropDownIcon className='w-3 h-3 ' />}
</div>
)}

View File

@ -5,6 +5,7 @@ import {
RiCollapseDiagonalLine,
RiExpandDiagonalLine,
} from '@remixicon/react'
import ActionButton from '@/app/components/base/action-button'
type Props = {
isExpand: boolean
@ -21,7 +22,9 @@ const ExpandBtn: FC<Props> = ({
const Icon = isExpand ? RiCollapseDiagonalLine : RiExpandDiagonalLine
return (
<Icon className='w-3.5 h-3.5 text-gray-500 cursor-pointer' onClick={handleToggle} />
<ActionButton onClick={handleToggle}>
<Icon className='w-4 h-4' />
</ActionButton>
)
}
export default React.memo(ExpandBtn)

View File

@ -146,7 +146,10 @@ const VarReferencePicker: FC<Props> = ({
const varName = useMemo(() => {
if (hasValue) {
const isSystem = isSystemVar(value as ValueSelector)
const varName = value.length >= 3 ? (value as ValueSelector).slice(-2).join('.') : value[value.length - 1]
let varName = ''
if (Array.isArray(value))
varName = value.length >= 3 ? (value as ValueSelector).slice(-2).join('.') : value[value.length - 1]
return `${isSystem ? 'sys.' : ''}${varName}`
}
return ''

View File

@ -4,15 +4,17 @@ import React, { useCallback } from 'react'
import { useBoolean } from 'ahooks'
import {
RiDeleteBinLine,
RiEditLine,
} from '@remixicon/react'
import type { DataSet } from '@/models/datasets'
import { DataSourceType } from '@/models/datasets'
import { Settings01 } from '@/app/components/base/icons/src/vender/line/general'
import FileIcon from '@/app/components/base/file-icon'
import { Folder } from '@/app/components/base/icons/src/vender/solid/files'
import SettingsModal from '@/app/components/app/configuration/dataset-config/settings-modal'
import Drawer from '@/app/components/base/drawer'
import useBreakpoints, { MediaType } from '@/hooks/use-breakpoints'
import Badge from '@/app/components/base/badge'
import { useKnowledge } from '@/hooks/use-knowledge'
type Props = {
payload: DataSet
@ -29,6 +31,7 @@ const DatasetItem: FC<Props> = ({
}) => {
const media = useBreakpoints()
const isMobile = media === MediaType.mobile
const { formatIndexingTechniqueAndMethod } = useKnowledge()
const [isShowSettingsModal, {
setTrue: showSettingsModal,
@ -62,7 +65,7 @@ const DatasetItem: FC<Props> = ({
className='flex items-center justify-center w-6 h-6 hover:bg-black/5 rounded-md cursor-pointer'
onClick={showSettingsModal}
>
<Settings01 className='w-4 h-4 text-gray-500' />
<RiEditLine className='w-4 h-4 text-gray-500' />
</div>
<div
className='flex items-center justify-center w-6 h-6 hover:bg-black/5 rounded-md cursor-pointer'
@ -72,6 +75,10 @@ const DatasetItem: FC<Props> = ({
</div>
</div>
)}
<Badge
className='group-hover/dataset-item:hidden shrink-0'
text={formatIndexingTechniqueAndMethod(payload.indexing_technique, payload.retrieval_model_dict?.search_method)}
/>
{isShowSettingsModal && (
<Drawer isOpen={isShowSettingsModal} onClose={hideSettingsModal} footer={null} mask={isMobile} panelClassname='mt-16 mx-2 sm:mr-2 mb-3 !p-0 !max-w-[640px] rounded-xl'>

Some files were not shown because too many files have changed in this diff Show More