mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into feat/workflow
This commit is contained in:
commit
31490417d1
|
|
@ -38,10 +38,11 @@ from extensions import (
|
|||
from extensions.ext_database import db
|
||||
from extensions.ext_login import login_manager
|
||||
from libs.passport import PassportService
|
||||
|
||||
# DO NOT REMOVE BELOW
|
||||
from services.account_service import AccountService
|
||||
|
||||
# DO NOT REMOVE BELOW
|
||||
from events import event_handlers
|
||||
from models import account, dataset, model, source, task, tool, tools, web
|
||||
# DO NOT REMOVE ABOVE
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class Config:
|
|||
# ------------------------
|
||||
# General Configurations.
|
||||
# ------------------------
|
||||
self.CURRENT_VERSION = "0.5.5"
|
||||
self.CURRENT_VERSION = "0.5.6"
|
||||
self.COMMIT_SHA = get_env('COMMIT_SHA')
|
||||
self.EDITION = "SELF_HOSTED"
|
||||
self.DEPLOY_ENV = get_env('DEPLOY_ENV')
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@ class ChatMessageAudioApi(Resource):
|
|||
tenant_id=app_model.tenant_id,
|
||||
file=file,
|
||||
end_user=None,
|
||||
promot=app_model.app_model_config.pre_prompt
|
||||
)
|
||||
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ class FileExtractor:
|
|||
else MarkdownLoader(file_path, autodetect_encoding=True)
|
||||
elif file_extension in ['.htm', '.html']:
|
||||
loader = HTMLLoader(file_path)
|
||||
elif file_extension in ['.docx', '.doc']:
|
||||
elif file_extension in ['.docx']:
|
||||
loader = Docx2txtLoader(file_path)
|
||||
elif file_extension == '.csv':
|
||||
loader = CSVLoader(file_path, autodetect_encoding=True)
|
||||
|
|
@ -96,7 +96,7 @@ class FileExtractor:
|
|||
loader = MarkdownLoader(file_path, autodetect_encoding=True)
|
||||
elif file_extension in ['.htm', '.html']:
|
||||
loader = HTMLLoader(file_path)
|
||||
elif file_extension in ['.docx', '.doc']:
|
||||
elif file_extension in ['.docx']:
|
||||
loader = Docx2txtLoader(file_path)
|
||||
elif file_extension == '.csv':
|
||||
loader = CSVLoader(file_path, autodetect_encoding=True)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from mimetypes import guess_extension
|
||||
from typing import Optional, Union, cast
|
||||
|
|
@ -20,7 +21,14 @@ from core.file.message_file_parser import FileTransferMethod
|
|||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.llm_entities import LLMUsage
|
||||
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ModelFeature
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
|
|
@ -77,7 +85,9 @@ class BaseAssistantApplicationRunner(AppRunner):
|
|||
self.message = message
|
||||
self.user_id = user_id
|
||||
self.memory = memory
|
||||
self.history_prompt_messages = prompt_messages
|
||||
self.history_prompt_messages = self.organize_agent_history(
|
||||
prompt_messages=prompt_messages or []
|
||||
)
|
||||
self.variables_pool = variables_pool
|
||||
self.db_variables_pool = db_variables
|
||||
self.model_instance = model_instance
|
||||
|
|
@ -504,17 +514,6 @@ class BaseAssistantApplicationRunner(AppRunner):
|
|||
agent_thought.tool_labels_str = json.dumps(labels)
|
||||
|
||||
db.session.commit()
|
||||
|
||||
def get_history_prompt_messages(self) -> list[PromptMessage]:
|
||||
"""
|
||||
Get history prompt messages
|
||||
"""
|
||||
if self.history_prompt_messages is None:
|
||||
self.history_prompt_messages = db.session.query(PromptMessage).filter(
|
||||
PromptMessage.message_id == self.message.id,
|
||||
).order_by(PromptMessage.position.asc()).all()
|
||||
|
||||
return self.history_prompt_messages
|
||||
|
||||
def transform_tool_invoke_messages(self, messages: list[ToolInvokeMessage]) -> list[ToolInvokeMessage]:
|
||||
"""
|
||||
|
|
@ -589,4 +588,54 @@ class BaseAssistantApplicationRunner(AppRunner):
|
|||
"""
|
||||
db_variables.updated_at = datetime.utcnow()
|
||||
db_variables.variables_str = json.dumps(jsonable_encoder(tool_variables.pool))
|
||||
db.session.commit()
|
||||
db.session.commit()
|
||||
|
||||
def organize_agent_history(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]:
|
||||
"""
|
||||
Organize agent history
|
||||
"""
|
||||
result = []
|
||||
# check if there is a system message in the beginning of the conversation
|
||||
if prompt_messages and isinstance(prompt_messages[0], SystemPromptMessage):
|
||||
result.append(prompt_messages[0])
|
||||
|
||||
messages: list[Message] = db.session.query(Message).filter(
|
||||
Message.conversation_id == self.message.conversation_id,
|
||||
).order_by(Message.created_at.asc()).all()
|
||||
|
||||
for message in messages:
|
||||
result.append(UserPromptMessage(content=message.query))
|
||||
agent_thoughts: list[MessageAgentThought] = message.agent_thoughts
|
||||
for agent_thought in agent_thoughts:
|
||||
tools = agent_thought.tool
|
||||
if tools:
|
||||
tools = tools.split(';')
|
||||
tool_calls: list[AssistantPromptMessage.ToolCall] = []
|
||||
tool_call_response: list[ToolPromptMessage] = []
|
||||
tool_inputs = json.loads(agent_thought.tool_input)
|
||||
for tool in tools:
|
||||
# generate a uuid for tool call
|
||||
tool_call_id = str(uuid.uuid4())
|
||||
tool_calls.append(AssistantPromptMessage.ToolCall(
|
||||
id=tool_call_id,
|
||||
type='function',
|
||||
function=AssistantPromptMessage.ToolCall.ToolCallFunction(
|
||||
name=tool,
|
||||
arguments=json.dumps(tool_inputs.get(tool, {})),
|
||||
)
|
||||
))
|
||||
tool_call_response.append(ToolPromptMessage(
|
||||
content=agent_thought.observation,
|
||||
name=tool,
|
||||
tool_call_id=tool_call_id,
|
||||
))
|
||||
|
||||
result.extend([
|
||||
AssistantPromptMessage(
|
||||
content=agent_thought.thought,
|
||||
tool_calls=tool_calls,
|
||||
),
|
||||
*tool_call_response
|
||||
])
|
||||
|
||||
return result
|
||||
|
|
@ -12,6 +12,7 @@ from core.model_runtime.entities.message_entities import (
|
|||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
ToolPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
|
|
@ -39,6 +40,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
self._repack_app_orchestration_config(app_orchestration_config)
|
||||
|
||||
agent_scratchpad: list[AgentScratchpadUnit] = []
|
||||
self._init_agent_scratchpad(agent_scratchpad, self.history_prompt_messages)
|
||||
|
||||
# check model mode
|
||||
if self.app_orchestration_config.model_config.mode == "completion":
|
||||
|
|
@ -131,61 +133,95 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
# recale llm max tokens
|
||||
self.recale_llm_max_tokens(self.model_config, prompt_messages)
|
||||
# invoke model
|
||||
llm_result: LLMResult = model_instance.invoke_llm(
|
||||
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=app_orchestration_config.model_config.parameters,
|
||||
tools=[],
|
||||
stop=app_orchestration_config.model_config.stop,
|
||||
stream=False,
|
||||
stream=True,
|
||||
user=self.user_id,
|
||||
callbacks=[],
|
||||
)
|
||||
|
||||
# check llm result
|
||||
if not llm_result:
|
||||
if not chunks:
|
||||
raise ValueError("failed to invoke llm")
|
||||
|
||||
# get scratchpad
|
||||
scratchpad = self._extract_response_scratchpad(llm_result.message.content)
|
||||
agent_scratchpad.append(scratchpad)
|
||||
|
||||
# get llm usage
|
||||
if llm_result.usage:
|
||||
increase_usage(llm_usage, llm_result.usage)
|
||||
|
||||
usage_dict = {}
|
||||
react_chunks = self._handle_stream_react(chunks, usage_dict)
|
||||
scratchpad = AgentScratchpadUnit(
|
||||
agent_response='',
|
||||
thought='',
|
||||
action_str='',
|
||||
observation='',
|
||||
action=None
|
||||
)
|
||||
|
||||
# publish agent thought if it's first iteration
|
||||
if iteration_step == 1:
|
||||
self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
|
||||
|
||||
for chunk in react_chunks:
|
||||
if isinstance(chunk, dict):
|
||||
scratchpad.agent_response += json.dumps(chunk)
|
||||
try:
|
||||
if scratchpad.action:
|
||||
raise Exception("")
|
||||
scratchpad.action_str = json.dumps(chunk)
|
||||
scratchpad.action = AgentScratchpadUnit.Action(
|
||||
action_name=chunk['action'],
|
||||
action_input=chunk['action_input']
|
||||
)
|
||||
except:
|
||||
scratchpad.thought += json.dumps(chunk)
|
||||
yield LLMResultChunk(
|
||||
model=self.model_config.model,
|
||||
prompt_messages=prompt_messages,
|
||||
system_fingerprint='',
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content=json.dumps(chunk)
|
||||
),
|
||||
usage=None
|
||||
)
|
||||
)
|
||||
else:
|
||||
scratchpad.agent_response += chunk
|
||||
scratchpad.thought += chunk
|
||||
yield LLMResultChunk(
|
||||
model=self.model_config.model,
|
||||
prompt_messages=prompt_messages,
|
||||
system_fingerprint='',
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content=chunk
|
||||
),
|
||||
usage=None
|
||||
)
|
||||
)
|
||||
|
||||
agent_scratchpad.append(scratchpad)
|
||||
|
||||
# get llm usage
|
||||
if 'usage' in usage_dict:
|
||||
increase_usage(llm_usage, usage_dict['usage'])
|
||||
else:
|
||||
usage_dict['usage'] = LLMUsage.empty_usage()
|
||||
|
||||
self.save_agent_thought(agent_thought=agent_thought,
|
||||
tool_name=scratchpad.action.action_name if scratchpad.action else '',
|
||||
tool_input=scratchpad.action.action_input if scratchpad.action else '',
|
||||
thought=scratchpad.thought,
|
||||
observation='',
|
||||
answer=llm_result.message.content,
|
||||
answer=scratchpad.agent_response,
|
||||
messages_ids=[],
|
||||
llm_usage=llm_result.usage)
|
||||
llm_usage=usage_dict['usage'])
|
||||
|
||||
if scratchpad.action and scratchpad.action.action_name.lower() != "final answer":
|
||||
self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
|
||||
|
||||
# publish agent thought if it's not empty and there is a action
|
||||
if scratchpad.thought and scratchpad.action:
|
||||
# check if final answer
|
||||
if not scratchpad.action.action_name.lower() == "final answer":
|
||||
yield LLMResultChunk(
|
||||
model=model_instance.model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=0,
|
||||
message=AssistantPromptMessage(
|
||||
content=scratchpad.thought
|
||||
),
|
||||
usage=llm_result.usage,
|
||||
),
|
||||
system_fingerprint=''
|
||||
)
|
||||
|
||||
if not scratchpad.action:
|
||||
# failed to extract action, return final answer directly
|
||||
final_answer = scratchpad.agent_response or ''
|
||||
|
|
@ -260,7 +296,6 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
|
||||
# save scratchpad
|
||||
scratchpad.observation = observation
|
||||
scratchpad.agent_response = llm_result.message.content
|
||||
|
||||
# save agent thought
|
||||
self.save_agent_thought(
|
||||
|
|
@ -269,7 +304,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
tool_input=tool_call_args,
|
||||
thought=None,
|
||||
observation=observation,
|
||||
answer=llm_result.message.content,
|
||||
answer=scratchpad.agent_response,
|
||||
messages_ids=message_file_ids,
|
||||
)
|
||||
self.queue_manager.publish_agent_thought(agent_thought, PublishFrom.APPLICATION_MANAGER)
|
||||
|
|
@ -316,6 +351,97 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
system_fingerprint=''
|
||||
), PublishFrom.APPLICATION_MANAGER)
|
||||
|
||||
def _handle_stream_react(self, llm_response: Generator[LLMResultChunk, None, None], usage: dict) \
|
||||
-> Generator[Union[str, dict], None, None]:
|
||||
def parse_json(json_str):
|
||||
try:
|
||||
return json.loads(json_str.strip())
|
||||
except:
|
||||
return json_str
|
||||
|
||||
def extra_json_from_code_block(code_block) -> Generator[Union[dict, str], None, None]:
|
||||
code_blocks = re.findall(r'```(.*?)```', code_block, re.DOTALL)
|
||||
if not code_blocks:
|
||||
return
|
||||
for block in code_blocks:
|
||||
json_text = re.sub(r'^[a-zA-Z]+\n', '', block.strip(), flags=re.MULTILINE)
|
||||
yield parse_json(json_text)
|
||||
|
||||
code_block_cache = ''
|
||||
code_block_delimiter_count = 0
|
||||
in_code_block = False
|
||||
json_cache = ''
|
||||
json_quote_count = 0
|
||||
in_json = False
|
||||
got_json = False
|
||||
|
||||
for response in llm_response:
|
||||
response = response.delta.message.content
|
||||
if not isinstance(response, str):
|
||||
continue
|
||||
|
||||
# stream
|
||||
index = 0
|
||||
while index < len(response):
|
||||
steps = 1
|
||||
delta = response[index:index+steps]
|
||||
if delta == '`':
|
||||
code_block_cache += delta
|
||||
code_block_delimiter_count += 1
|
||||
else:
|
||||
if not in_code_block:
|
||||
if code_block_delimiter_count > 0:
|
||||
yield code_block_cache
|
||||
code_block_cache = ''
|
||||
else:
|
||||
code_block_cache += delta
|
||||
code_block_delimiter_count = 0
|
||||
|
||||
if code_block_delimiter_count == 3:
|
||||
if in_code_block:
|
||||
yield from extra_json_from_code_block(code_block_cache)
|
||||
code_block_cache = ''
|
||||
|
||||
in_code_block = not in_code_block
|
||||
code_block_delimiter_count = 0
|
||||
|
||||
if not in_code_block:
|
||||
# handle single json
|
||||
if delta == '{':
|
||||
json_quote_count += 1
|
||||
in_json = True
|
||||
json_cache += delta
|
||||
elif delta == '}':
|
||||
json_cache += delta
|
||||
if json_quote_count > 0:
|
||||
json_quote_count -= 1
|
||||
if json_quote_count == 0:
|
||||
in_json = False
|
||||
got_json = True
|
||||
index += steps
|
||||
continue
|
||||
else:
|
||||
if in_json:
|
||||
json_cache += delta
|
||||
|
||||
if got_json:
|
||||
got_json = False
|
||||
yield parse_json(json_cache)
|
||||
json_cache = ''
|
||||
json_quote_count = 0
|
||||
in_json = False
|
||||
|
||||
if not in_code_block and not in_json:
|
||||
yield delta.replace('`', '')
|
||||
|
||||
index += steps
|
||||
|
||||
if code_block_cache:
|
||||
yield code_block_cache
|
||||
|
||||
if json_cache:
|
||||
yield parse_json(json_cache)
|
||||
|
||||
def _fill_in_inputs_from_external_data_tools(self, instruction: str, inputs: dict) -> str:
|
||||
"""
|
||||
fill in inputs from external data tools
|
||||
|
|
@ -327,122 +453,40 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
continue
|
||||
|
||||
return instruction
|
||||
|
||||
def _extract_response_scratchpad(self, content: str) -> AgentScratchpadUnit:
|
||||
|
||||
def _init_agent_scratchpad(self,
|
||||
agent_scratchpad: list[AgentScratchpadUnit],
|
||||
messages: list[PromptMessage]
|
||||
) -> list[AgentScratchpadUnit]:
|
||||
"""
|
||||
extract response from llm response
|
||||
init agent scratchpad
|
||||
"""
|
||||
def extra_quotes() -> AgentScratchpadUnit:
|
||||
agent_response = content
|
||||
# try to extract all quotes
|
||||
pattern = re.compile(r'```(.*?)```', re.DOTALL)
|
||||
quotes = pattern.findall(content)
|
||||
|
||||
# try to extract action from end to start
|
||||
for i in range(len(quotes) - 1, 0, -1):
|
||||
"""
|
||||
1. use json load to parse action
|
||||
2. use plain text `Action: xxx` to parse action
|
||||
"""
|
||||
try:
|
||||
action = json.loads(quotes[i].replace('```', ''))
|
||||
action_name = action.get("action")
|
||||
action_input = action.get("action_input")
|
||||
agent_thought = agent_response.replace(quotes[i], '')
|
||||
|
||||
if action_name and action_input:
|
||||
return AgentScratchpadUnit(
|
||||
agent_response=content,
|
||||
thought=agent_thought,
|
||||
action_str=quotes[i],
|
||||
action=AgentScratchpadUnit.Action(
|
||||
action_name=action_name,
|
||||
action_input=action_input,
|
||||
)
|
||||
current_scratchpad: AgentScratchpadUnit = None
|
||||
for message in messages:
|
||||
if isinstance(message, AssistantPromptMessage):
|
||||
current_scratchpad = AgentScratchpadUnit(
|
||||
agent_response=message.content,
|
||||
thought=message.content,
|
||||
action_str='',
|
||||
action=None,
|
||||
observation=None
|
||||
)
|
||||
if message.tool_calls:
|
||||
try:
|
||||
current_scratchpad.action = AgentScratchpadUnit.Action(
|
||||
action_name=message.tool_calls[0].function.name,
|
||||
action_input=json.loads(message.tool_calls[0].function.arguments)
|
||||
)
|
||||
except:
|
||||
# try to parse action from plain text
|
||||
action_name = re.findall(r'action: (.*)', quotes[i], re.IGNORECASE)
|
||||
action_input = re.findall(r'action input: (.*)', quotes[i], re.IGNORECASE)
|
||||
# delete action from agent response
|
||||
agent_thought = agent_response.replace(quotes[i], '')
|
||||
# remove extra quotes
|
||||
agent_thought = re.sub(r'```(json)*\n*```', '', agent_thought, flags=re.DOTALL)
|
||||
# remove Action: xxx from agent thought
|
||||
agent_thought = re.sub(r'Action:.*', '', agent_thought, flags=re.IGNORECASE)
|
||||
except:
|
||||
pass
|
||||
|
||||
agent_scratchpad.append(current_scratchpad)
|
||||
elif isinstance(message, ToolPromptMessage):
|
||||
if current_scratchpad:
|
||||
current_scratchpad.observation = message.content
|
||||
|
||||
if action_name and action_input:
|
||||
return AgentScratchpadUnit(
|
||||
agent_response=content,
|
||||
thought=agent_thought,
|
||||
action_str=quotes[i],
|
||||
action=AgentScratchpadUnit.Action(
|
||||
action_name=action_name[0],
|
||||
action_input=action_input[0],
|
||||
)
|
||||
)
|
||||
return agent_scratchpad
|
||||
|
||||
def extra_json():
|
||||
agent_response = content
|
||||
# try to extract all json
|
||||
structures, pair_match_stack = [], []
|
||||
started_at, end_at = 0, 0
|
||||
for i in range(len(content)):
|
||||
if content[i] == '{':
|
||||
pair_match_stack.append(i)
|
||||
if len(pair_match_stack) == 1:
|
||||
started_at = i
|
||||
elif content[i] == '}':
|
||||
begin = pair_match_stack.pop()
|
||||
if not pair_match_stack:
|
||||
end_at = i + 1
|
||||
structures.append((content[begin:i+1], (started_at, end_at)))
|
||||
|
||||
# handle the last character
|
||||
if pair_match_stack:
|
||||
end_at = len(content)
|
||||
structures.append((content[pair_match_stack[0]:], (started_at, end_at)))
|
||||
|
||||
for i in range(len(structures), 0, -1):
|
||||
try:
|
||||
json_content, (started_at, end_at) = structures[i - 1]
|
||||
action = json.loads(json_content)
|
||||
action_name = action.get("action")
|
||||
action_input = action.get("action_input")
|
||||
# delete json content from agent response
|
||||
agent_thought = agent_response[:started_at] + agent_response[end_at:]
|
||||
# remove extra quotes like ```(json)*\n\n```
|
||||
agent_thought = re.sub(r'```(json)*\n*```', '', agent_thought, flags=re.DOTALL)
|
||||
# remove Action: xxx from agent thought
|
||||
agent_thought = re.sub(r'Action:.*', '', agent_thought, flags=re.IGNORECASE)
|
||||
|
||||
if action_name and action_input is not None:
|
||||
return AgentScratchpadUnit(
|
||||
agent_response=content,
|
||||
thought=agent_thought,
|
||||
action_str=json_content,
|
||||
action=AgentScratchpadUnit.Action(
|
||||
action_name=action_name,
|
||||
action_input=action_input,
|
||||
)
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
agent_scratchpad = extra_quotes()
|
||||
if agent_scratchpad:
|
||||
return agent_scratchpad
|
||||
agent_scratchpad = extra_json()
|
||||
if agent_scratchpad:
|
||||
return agent_scratchpad
|
||||
|
||||
return AgentScratchpadUnit(
|
||||
agent_response=content,
|
||||
thought=content,
|
||||
action_str='',
|
||||
action=None
|
||||
)
|
||||
|
||||
def _check_cot_prompt_messages(self, mode: Literal["completion", "chat"],
|
||||
agent_prompt_message: AgentPromptEntity,
|
||||
):
|
||||
|
|
@ -556,15 +600,15 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
|
|||
# organize prompt messages
|
||||
if mode == "chat":
|
||||
# override system message
|
||||
overrided = False
|
||||
overridden = False
|
||||
prompt_messages = prompt_messages.copy()
|
||||
for prompt_message in prompt_messages:
|
||||
if isinstance(prompt_message, SystemPromptMessage):
|
||||
prompt_message.content = system_message
|
||||
overrided = True
|
||||
overridden = True
|
||||
break
|
||||
|
||||
if not overrided:
|
||||
if not overridden:
|
||||
prompt_messages.insert(0, SystemPromptMessage(
|
||||
content=system_message,
|
||||
))
|
||||
|
|
|
|||
|
|
@ -37,9 +37,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -37,9 +37,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -37,9 +37,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -37,9 +37,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -35,9 +35,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -37,9 +37,6 @@ parameter_rules:
|
|||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
precision: 2
|
||||
min: 0
|
||||
max: 1
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
|
|
|
|||
|
|
@ -376,7 +376,6 @@ class OAIAPICompatLargeLanguageModel(_CommonOAI_API_Compat, LargeLanguageModel):
|
|||
chunk_json = json.loads(decoded_chunk)
|
||||
# stream ended
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"decoded_chunk error: {e}, delimiter={delimiter}, decoded_chunk={decoded_chunk}")
|
||||
yield create_final_llm_result_chunk(
|
||||
index=chunk_index + 1,
|
||||
message=AssistantPromptMessage(content=""),
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="64" height="64" viewBox="0 0 32 32"><g transform="matrix(.266667 0 0 .266667 -17.954934 -5.057333)"><circle cx="127.332" cy="78.966" r="51.15" fill="#de5833"/><defs><path id="A" d="M178.684 78.824c0 28.316-23.035 51.354-51.354 51.354-28.313 0-51.348-23.04-51.348-51.354s23.036-51.35 51.348-51.35c28.318 0 51.354 23.036 51.354 51.35z"/></defs><clipPath id="B"><use xlink:href="#A"/></clipPath><g clip-path="url(#B)"><path d="M148.293 155.158c-1.8-8.285-12.262-27.04-16.23-34.97s-7.938-19.1-6.13-26.322c.328-1.312-3.436-11.308-2.354-12.015 8.416-5.5 10.632.6 14.002-1.862 1.734-1.273 4.1 1.047 4.7-1.06 2.158-7.567-3.006-20.76-8.77-26.526-1.885-1.88-4.77-3.06-8.03-3.687-1.254-1.713-3.275-3.36-6.138-4.88-3.188-1.697-10.12-3.938-13.717-4.535-2.492-.4-3.055.287-4.12.46.992.088 5.7 2.414 6.615 2.55-.916.62-3.607-.028-5.324.742-.865.392-1.512 1.877-1.506 2.58 4.9-.496 12.574-.016 17.1 2-3.602.4-9.08.867-11.436 2.105-6.848 3.608-9.873 12.035-8.07 22.133 1.804 10.075 9.738 46.85 12.262 59.13 2.525 12.264-5.408 20.2-10.455 22.354l5.408.363-1.8 3.967c6.484.72 13.695-1.44 13.695-1.44-1.438 3.965-11.176 5.412-11.176 5.412s4.7 1.438 12.258-1.447l12.263-4.688 3.604 9.373 6.854-6.847 2.885 7.2c.014-.001 5.424-1.808 3.62-10.103z" fill="#d5d7d8"/><path d="M150.47 153.477c-1.795-8.3-12.256-27.043-16.228-34.98s-7.935-19.112-6.13-26.32c.335-1.3.34-6.668 1.43-7.38 8.4-5.494 7.812-.184 11.187-2.645 1.74-1.27 3.133-2.806 3.738-4.912 2.164-7.572-3.006-20.76-8.773-26.53-1.88-1.88-4.768-3.062-8.023-3.686-1.252-1.718-3.27-3.36-6.13-4.882-5.4-2.862-12.074-4.006-18.266-2.883 1 .1 3.256 2.138 4.168 2.273-1.38.936-5.053.815-5.03 2.896 4.916-.492 10.303.285 14.834 2.297-3.602.4-6.955 1.3-9.3 2.543-6.854 3.603-8.656 10.812-6.854 20.914 1.807 10.097 9.742 46.873 12.256 59.126 2.527 12.26-5.402 20.188-10.45 22.354l5.408.36-1.8 3.973c6.484.72 13.695-1.44 13.695-1.44-1.438 3.974-11.176 5.406-11.176 5.406s4.686 1.44 12.258-1.445l12.27-4.688 3.604 9.373 6.852-6.85 2.9 7.215c-.016.007 5.388-1.797 3.58-10.088z" fill="#fff"/><path d="M109.02 70.69c0-2.093 1.693-3.787 3.79-3.787 2.1 0 3.785 1.694 3.785 3.787s-1.695 3.786-3.785 3.786c-2.096.001-3.79-1.692-3.79-3.786z" fill="#2d4f8e"/><path d="M113.507 69.43a.98.98 0 0 1 .98-.983c.543 0 .984.438.984.983s-.44.984-.984.984c-.538.001-.98-.44-.98-.984z" fill="#fff"/><path d="M134.867 68.445c0-1.793 1.46-3.25 3.252-3.25 1.8 0 3.256 1.457 3.256 3.25 0 1.8-1.455 3.258-3.256 3.258a3.26 3.26 0 0 1-3.252-3.258z" fill="#2d4f8e"/><path d="M138.725 67.363c0-.463.38-.843.838-.843a.84.84 0 0 1 .846.843c0 .47-.367.842-.846.842a.84.84 0 0 1-.838-.842z" fill="#fff"/><linearGradient id="C" gradientUnits="userSpaceOnUse" x1="105.318" y1="60.979" x2="113.887" y2="60.979"><stop offset=".006" stop-color="#6176b9"/><stop offset=".691" stop-color="#394a9f"/></linearGradient><path d="M113.886 59.718s-2.854-1.3-5.63.453-2.668 3.523-2.668 3.523-1.473-3.283 2.453-4.892 5.844.916 5.844.916z" fill="url(#C)"/><linearGradient id="D" gradientUnits="userSpaceOnUse" x1="132.273" y1="58.371" x2="140.078" y2="58.371"><stop offset=".006" stop-color="#6176b9"/><stop offset=".691" stop-color="#394a9f"/></linearGradient><path d="M140.078 59.458s-2.05-1.172-3.643-1.152c-3.27.043-4.162 1.488-4.162 1.488s.55-3.445 4.732-2.754c2.268.377 3.073 2.418 3.073 2.418z" fill="url(#D)"/></g><path d="M124.4 85.295c.38-2.3 6.3-6.625 10.5-6.887 4.2-.265 5.5-.205 9-1.043s12.535-3.088 15.033-4.242c2.504-1.156 13.104.572 5.63 4.738-3.232 1.8-11.943 5.13-18.172 6.987-6.22 1.86-10-1.776-12.06 1.28-1.646 2.432-.334 5.762 7.1 6.453 10.037.93 19.66-4.52 20.72-1.625s-8.625 6.508-14.525 6.623c-5.893.1-17.77-3.896-19.555-5.137s-4.165-4.13-3.67-7.148z" fill="#fdd20a"/><path d="M128.943 115.592s-14.102-7.52-14.332-4.47c-.238 3.056 0 15.5 1.643 16.45s13.396-6.108 13.396-6.108zm5.403-.474s9.635-7.285 11.754-6.815c2.1.48 2.582 15.5.7 16.23-1.88.7-12.908-3.813-12.908-3.813z" fill="#65bc46"/><path d="M125.53 116.4c0 4.932-.7 7.05 1.4 7.52s6.104 0 7.518-.938.232-7.28-.232-8.465c-.477-1.174-8.696-.232-8.696 1.884z" fill="#43a244"/><path d="M126.426 115.292c0 4.933-.707 7.05 1.4 7.52 2.106.48 6.104 0 7.52-.938 1.4-.94.23-7.28-.236-8.466-.473-1.173-8.692-.227-8.692 1.885z" fill="#65bc46"/><circle cx="127.331" cy="78.965" r="57.5" fill="none" stroke="#de5833" stroke-width="5"/></g></svg>
|
||||
|
After Width: | Height: | Size: 4.3 KiB |
|
|
@ -0,0 +1,20 @@
|
|||
from core.tools.errors import ToolProviderCredentialValidationError
|
||||
from core.tools.provider.builtin.duckduckgo.tools.duckduckgo_search import DuckDuckGoSearchTool
|
||||
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
|
||||
|
||||
|
||||
class DuckDuckGoProvider(BuiltinToolProviderController):
|
||||
def _validate_credentials(self, credentials: dict) -> None:
|
||||
try:
|
||||
DuckDuckGoSearchTool().fork_tool_runtime(
|
||||
meta={
|
||||
"credentials": credentials,
|
||||
}
|
||||
).invoke(
|
||||
user_id='',
|
||||
tool_parameters={
|
||||
"query": "John Doe",
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
raise ToolProviderCredentialValidationError(str(e))
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
identity:
|
||||
author: Yash Parmar
|
||||
name: duckduckgo
|
||||
label:
|
||||
en_US: DuckDuckGo
|
||||
zh_Hans: DuckDuckGo
|
||||
description:
|
||||
en_US: A privacy-focused search engine.
|
||||
zh_Hans: 一个注重隐私的搜索引擎。
|
||||
icon: icon.svg
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
from typing import Any
|
||||
|
||||
from langchain.tools import DuckDuckGoSearchRun
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.tools.entities.tool_entities import ToolInvokeMessage
|
||||
from core.tools.tool.builtin_tool import BuiltinTool
|
||||
|
||||
|
||||
class DuckDuckGoInput(BaseModel):
|
||||
query: str = Field(..., description="Search query.")
|
||||
|
||||
|
||||
class DuckDuckGoSearchTool(BuiltinTool):
|
||||
"""
|
||||
Tool for performing a search using DuckDuckGo search engine.
|
||||
"""
|
||||
|
||||
def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage | list[ToolInvokeMessage]:
|
||||
"""
|
||||
Invoke the DuckDuckGo search tool.
|
||||
|
||||
Args:
|
||||
user_id (str): The ID of the user invoking the tool.
|
||||
tool_parameters (dict[str, Any]): The parameters for the tool invocation.
|
||||
|
||||
Returns:
|
||||
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation.
|
||||
"""
|
||||
query = tool_parameters.get('query', '')
|
||||
|
||||
if not query:
|
||||
return self.create_text_message('Please input query')
|
||||
|
||||
tool = DuckDuckGoSearchRun(args_schema=DuckDuckGoInput)
|
||||
|
||||
result = tool.run(query)
|
||||
|
||||
return self.create_text_message(self.summary(user_id=user_id, content=result))
|
||||
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
identity:
|
||||
name: duckduckgo_search
|
||||
author: Yash Parmar
|
||||
label:
|
||||
en_US: DuckDuckGo Search
|
||||
zh_Hans: DuckDuckGo 搜索
|
||||
description:
|
||||
human:
|
||||
en_US: Perform searches on DuckDuckGo and get results.
|
||||
zh_Hans: 在 DuckDuckGo 上进行搜索并获取结果。
|
||||
llm: Perform searches on DuckDuckGo and get results.
|
||||
parameters:
|
||||
- name: query
|
||||
type: string
|
||||
required: true
|
||||
label:
|
||||
en_US: Query string
|
||||
zh_Hans: 查询语句
|
||||
human_description:
|
||||
en_US: The search query.
|
||||
zh_Hans: 搜索查询语句。
|
||||
llm_description: Key words for searching
|
||||
form: llm
|
||||
|
|
@ -58,7 +58,7 @@ class BuiltinToolProviderController(ToolProviderController):
|
|||
tool_files = list(filter(lambda x: x.endswith(".yaml") and not x.startswith("__"), listdir(tool_path)))
|
||||
tools = []
|
||||
for tool_file in tool_files:
|
||||
with open(path.join(tool_path, tool_file)) as f:
|
||||
with open(path.join(tool_path, tool_file), encoding='utf-8') as f:
|
||||
# get tool name
|
||||
tool_name = tool_file.split(".")[0]
|
||||
tool = load(f.read(), FullLoader)
|
||||
|
|
@ -287,4 +287,4 @@ class BuiltinToolProviderController(ToolProviderController):
|
|||
:param tool_name: the name of the tool, defined in `get_tools`
|
||||
:param credentials: the credentials of the tool
|
||||
"""
|
||||
pass
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -127,6 +127,8 @@ class ApiTool(Tool):
|
|||
value = parameters[parameter['name']]
|
||||
elif parameter['required']:
|
||||
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
|
||||
else:
|
||||
value = (parameter.get('schema', {}) or {}).get('default', '')
|
||||
path_params[parameter['name']] = value
|
||||
|
||||
elif parameter['in'] == 'query':
|
||||
|
|
@ -135,6 +137,8 @@ class ApiTool(Tool):
|
|||
value = parameters[parameter['name']]
|
||||
elif parameter['required']:
|
||||
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
|
||||
else:
|
||||
value = (parameter.get('schema', {}) or {}).get('default', '')
|
||||
params[parameter['name']] = value
|
||||
|
||||
elif parameter['in'] == 'cookie':
|
||||
|
|
@ -143,6 +147,8 @@ class ApiTool(Tool):
|
|||
value = parameters[parameter['name']]
|
||||
elif parameter['required']:
|
||||
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
|
||||
else:
|
||||
value = (parameter.get('schema', {}) or {}).get('default', '')
|
||||
cookies[parameter['name']] = value
|
||||
|
||||
elif parameter['in'] == 'header':
|
||||
|
|
@ -151,6 +157,8 @@ class ApiTool(Tool):
|
|||
value = parameters[parameter['name']]
|
||||
elif parameter['required']:
|
||||
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
|
||||
else:
|
||||
value = (parameter.get('schema', {}) or {}).get('default', '')
|
||||
headers[parameter['name']] = value
|
||||
|
||||
# check if there is a request body and handle it
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class ApiBasedToolSchemaParser:
|
|||
required=parameter.get('required', False),
|
||||
form=ToolParameter.ToolParameterForm.LLM,
|
||||
llm_description=parameter.get('description'),
|
||||
default=parameter['default'] if 'default' in parameter else None,
|
||||
default=parameter['schema']['default'] if 'schema' in parameter and 'default' in parameter['schema'] else None,
|
||||
))
|
||||
# create tool bundle
|
||||
# check if there is a request body
|
||||
|
|
@ -115,7 +115,12 @@ class ApiBasedToolSchemaParser:
|
|||
|
||||
# check if there is a operation id, use $path_$method as operation id if not
|
||||
if 'operationId' not in interface['operation']:
|
||||
interface['operation']['operationId'] = f'{interface["path"]}_{interface["method"]}'
|
||||
# remove special characters like / to ensure the operation id is valid ^[a-zA-Z0-9_-]{1,64}$
|
||||
path = interface['path']
|
||||
if interface['path'].startswith('/'):
|
||||
path = interface['path'][1:]
|
||||
path = path.replace('/', '_')
|
||||
interface['operation']['operationId'] = f'{path}_{interface["method"]}'
|
||||
|
||||
bundles.append(ApiBasedToolBundle(
|
||||
server_url=server_url + interface['path'],
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ requires-python = ">=3.10"
|
|||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
"app.py",
|
||||
"__init__.py",
|
||||
"tests/",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -65,4 +65,5 @@ matplotlib~=3.8.2
|
|||
yfinance~=0.2.35
|
||||
pydub~=0.25.1
|
||||
gmpy2~=2.1.5
|
||||
numexpr~=2.9.0
|
||||
numexpr~=2.9.0
|
||||
duckduckgo-search==4.4.3
|
||||
|
|
@ -20,7 +20,7 @@ ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm', 'amr']
|
|||
|
||||
class AudioService:
|
||||
@classmethod
|
||||
def transcript_asr(cls, tenant_id: str, file: FileStorage, promot: str, end_user: Optional[str] = None):
|
||||
def transcript_asr(cls, tenant_id: str, file: FileStorage, end_user: Optional[str] = None):
|
||||
if file is None:
|
||||
raise NoAudioUploadedServiceError()
|
||||
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ from services.errors.file import FileTooLargeError, UnsupportedFileTypeError
|
|||
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'webp', 'gif', 'svg']
|
||||
IMAGE_EXTENSIONS.extend([ext.upper() for ext in IMAGE_EXTENSIONS])
|
||||
|
||||
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm', 'xlsx', 'docx', 'doc', 'csv'] + IMAGE_EXTENSIONS
|
||||
ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm', 'xlsx', 'docx', 'csv'] + IMAGE_EXTENSIONS
|
||||
UNSTRUSTURED_ALLOWED_EXTENSIONS = ['txt', 'markdown', 'md', 'pdf', 'html', 'htm', 'xlsx',
|
||||
'docx', 'doc', 'csv', 'eml', 'msg', 'pptx', 'ppt', 'xml'] + IMAGE_EXTENSIONS
|
||||
'docx', 'csv', 'eml', 'msg', 'pptx', 'ppt', 'xml'] + IMAGE_EXTENSIONS
|
||||
PREVIEW_WORDS_LIMIT = 3000
|
||||
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ class FileService:
|
|||
generator = storage.load(upload_file.key, stream=True)
|
||||
|
||||
return generator, upload_file.mime_type
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_public_image_preview(file_id: str) -> str:
|
||||
upload_file = db.session.query(UploadFile) \
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ version: '3.1'
|
|||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:0.5.5
|
||||
image: langgenius/dify-api:0.5.6
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'api' starts the API server.
|
||||
|
|
@ -135,7 +135,7 @@ services:
|
|||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:0.5.5
|
||||
image: langgenius/dify-api:0.5.6
|
||||
restart: always
|
||||
environment:
|
||||
# Startup mode, 'worker' starts the Celery worker for processing the queue.
|
||||
|
|
@ -206,7 +206,7 @@ services:
|
|||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:0.5.5
|
||||
image: langgenius/dify-web:0.5.6
|
||||
restart: always
|
||||
environment:
|
||||
EDITION: SELF_HOSTED
|
||||
|
|
|
|||
|
|
@ -147,6 +147,7 @@ const ConfigVar: FC<IConfigVarProps> = ({ promptVariables, readonly, onPromptVar
|
|||
) => {
|
||||
setShowExternalDataToolModal({
|
||||
payload: {
|
||||
type,
|
||||
variable: key,
|
||||
label: name,
|
||||
config,
|
||||
|
|
@ -245,7 +246,7 @@ const ConfigVar: FC<IConfigVarProps> = ({ promptVariables, readonly, onPromptVar
|
|||
|
||||
const handleConfig = ({ key, type, index, name, config, icon, icon_background }: ExternalDataToolParams) => {
|
||||
setCurrKey(key)
|
||||
if (type === 'api') {
|
||||
if (type !== 'string' && type !== 'paragraph' && type !== 'select') {
|
||||
handleOpenExternalDataToolModal({ key, type, index, name, config, icon, icon_background }, promptVariables)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ const Debug: FC<IDebug> = ({
|
|||
}
|
||||
let hasEmptyInput = ''
|
||||
const requiredVars = modelConfig.configs.prompt_variables.filter(({ key, name, required, type }) => {
|
||||
if (type === 'api')
|
||||
if (type !== 'string' && type !== 'paragraph' && type !== 'select')
|
||||
return false
|
||||
const res = (!key || !key.trim()) || (!name || !name.trim()) || (required || required === undefined || required === null)
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ const FormGeneration: FC<FormGenerationProps> = ({
|
|||
}
|
||||
})}
|
||||
onSelect={item => handleFormChange(form.variable, item.value as string)}
|
||||
popupClassName='w-[576px]'
|
||||
popupClassName='w-[576px] !z-[102]'
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ export const useCheckPromptVariables = () => {
|
|||
} = promptVariablesConfig
|
||||
let hasEmptyInput = ''
|
||||
const requiredVars = promptVariables.filter(({ key, name, required, type }) => {
|
||||
if (type === 'api')
|
||||
if (type !== 'string' && type !== 'paragraph' && type !== 'select')
|
||||
return false
|
||||
const res = (!key || !key.trim()) || (!name || !name.trim()) || (required || required === undefined || required === null)
|
||||
return res
|
||||
|
|
@ -146,8 +146,9 @@ export const useChat = (
|
|||
}, [stopChat, handleResponsing])
|
||||
|
||||
const handleRestart = useCallback(() => {
|
||||
handleStop()
|
||||
connversationId.current = ''
|
||||
taskIdRef.current = ''
|
||||
handleStop()
|
||||
const newChatList = config?.opening_statement
|
||||
? [{
|
||||
id: `${Date.now()}`,
|
||||
|
|
|
|||
|
|
@ -167,7 +167,7 @@ Chat applications support session persistence, allowing previous chat history to
|
|||
</Col>
|
||||
<Col sticky>
|
||||
|
||||
<CodeGroup title="Request" tag="POST" abel="/chat-messages" targetCode={`curl -X POST '${props.appDetail.api_base_url}/chat-messages' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n "inputs": ${JSON.stringify(props.inputs)},\n "query": "What are the specs of the iPhone 13 Pro Max?",\n "response_mode": "streaming",\n "conversation_id": "",\n "user": "abc-123",\n "files": [\n {\n "type": "images",\n "transfer_method": "remote_url",\n "url": "https://cloud.dify.ai/logo/logo-site.png"\n }\n ]\n}'`}>
|
||||
<CodeGroup title="Request" tag="POST" abel="/chat-messages" targetCode={`curl -X POST '${props.appDetail.api_base_url}/chat-messages' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n "inputs": ${JSON.stringify(props.inputs)},\n "query": "What are the specs of the iPhone 13 Pro Max?",\n "response_mode": "streaming",\n "conversation_id": "",\n "user": "abc-123",\n "files": [\n {\n "type": "image",\n "transfer_method": "remote_url",\n "url": "https://cloud.dify.ai/logo/logo-site.png"\n }\n ]\n}'`}>
|
||||
|
||||
```bash {{ title: 'cURL' }}
|
||||
curl -X POST '${props.appDetail.api_base_url}/chat-messages' \
|
||||
|
|
|
|||
|
|
@ -170,7 +170,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx'
|
|||
</Col>
|
||||
<Col sticky>
|
||||
|
||||
<CodeGroup title="Request" tag="POST" label="/chat-messages" targetCode={`curl -X POST '${props.appDetail.api_base_url}/chat-messages' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n "inputs": ${JSON.stringify(props.inputs)},\n "query": "What are the specs of the iPhone 13 Pro Max?",\n "response_mode": "streaming",\n "conversation_id": "",\n "user": "abc-123"\n "files": [\n {\n "type": "images",\n "transfer_method": "remote_url"\n "url": "https://cloud.dify.ai/logo/logo-site.png"\n }\n ]\n},'`}>
|
||||
<CodeGroup title="Request" tag="POST" label="/chat-messages" targetCode={`curl -X POST '${props.appDetail.api_base_url}/chat-messages' \\\n--header 'Authorization: Bearer {api_key}' \\\n--header 'Content-Type: application/json' \\\n--data-raw '{\n "inputs": ${JSON.stringify(props.inputs)},\n "query": "What are the specs of the iPhone 13 Pro Max?",\n "response_mode": "streaming",\n "conversation_id": "",\n "user": "abc-123",\n "files": [\n {\n "type": "image",\n "transfer_method": "remote_url"\n "url": "https://cloud.dify.ai/logo/logo-site.png"\n }\n ]\n}'`}>
|
||||
|
||||
```bash {{ title: 'cURL' }}
|
||||
curl -X POST '${props.appDetail.api_base_url}/chat-messages' \
|
||||
|
|
|
|||
|
|
@ -130,7 +130,7 @@ export const ANNOTATION_DEFAULT = {
|
|||
score_threshold: 0.9,
|
||||
}
|
||||
|
||||
export const MAX_TOOLS_NUM = 5
|
||||
export const MAX_TOOLS_NUM = 10
|
||||
|
||||
export const DEFAULT_AGENT_SETTING = {
|
||||
enabled: false,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "dify-web",
|
||||
"version": "0.5.5",
|
||||
"version": "0.5.6",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "next dev",
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ export const userInputsFormToPromptVariables = (useInputs: UserInputFormItem[] |
|
|||
return ['string', item['text-input']]
|
||||
|
||||
if (item.external_data_tool)
|
||||
return ['api', item.external_data_tool]
|
||||
return [item.external_data_tool.type, item.external_data_tool]
|
||||
|
||||
return ['select', item.select]
|
||||
})()
|
||||
|
|
@ -33,7 +33,17 @@ export const userInputsFormToPromptVariables = (useInputs: UserInputFormItem[] |
|
|||
is_context_var,
|
||||
})
|
||||
}
|
||||
else if (type === 'api') {
|
||||
else if (type === 'select') {
|
||||
promptVariables.push({
|
||||
key: content.variable,
|
||||
name: content.label,
|
||||
required: content.required,
|
||||
type: 'select',
|
||||
options: content.options,
|
||||
is_context_var,
|
||||
})
|
||||
}
|
||||
else {
|
||||
promptVariables.push({
|
||||
key: content.variable,
|
||||
name: content.label,
|
||||
|
|
@ -46,16 +56,6 @@ export const userInputsFormToPromptVariables = (useInputs: UserInputFormItem[] |
|
|||
is_context_var,
|
||||
})
|
||||
}
|
||||
else {
|
||||
promptVariables.push({
|
||||
key: content.variable,
|
||||
name: content.label,
|
||||
required: content.required,
|
||||
type: 'select',
|
||||
options: content.options,
|
||||
is_context_var,
|
||||
})
|
||||
}
|
||||
})
|
||||
return promptVariables
|
||||
}
|
||||
|
|
@ -79,7 +79,18 @@ export const promptVariablesToUserInputsForm = (promptVariables: PromptVariable[
|
|||
},
|
||||
} as any)
|
||||
}
|
||||
else if (item.type === 'api') {
|
||||
else if (item.type === 'select') {
|
||||
userInputs.push({
|
||||
select: {
|
||||
label: item.name,
|
||||
variable: item.key,
|
||||
required: item.required !== false, // default true
|
||||
options: item.options,
|
||||
default: '',
|
||||
},
|
||||
} as any)
|
||||
}
|
||||
else {
|
||||
userInputs.push({
|
||||
external_data_tool: {
|
||||
label: item.name,
|
||||
|
|
@ -93,17 +104,6 @@ export const promptVariablesToUserInputsForm = (promptVariables: PromptVariable[
|
|||
},
|
||||
} as any)
|
||||
}
|
||||
else {
|
||||
userInputs.push({
|
||||
select: {
|
||||
label: item.name,
|
||||
variable: item.key,
|
||||
required: item.required !== false, // default true
|
||||
options: item.options,
|
||||
default: '',
|
||||
},
|
||||
} as any)
|
||||
}
|
||||
})
|
||||
return userInputs
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue