Merge remote-tracking branch 'origin/feat/workflow-backend' into feat/workflow-backend

This commit is contained in:
jyong 2024-03-19 19:41:33 +08:00
commit 6e600bc0dc
27 changed files with 862 additions and 218 deletions

View File

@ -12,6 +12,8 @@ Please delete options that are not relevant.
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs)
- [ ] Improvementincluding but not limited to code refactoring, performance optimization, and UI/UX improvement
- [ ] Dependency upgrade
# How Has This Been Tested?

View File

@ -34,6 +34,9 @@ class BasicVariablesConfigManager:
typ = list(variable.keys())[0]
if typ == 'external_data_tool':
val = variable[typ]
if 'config' not in val:
continue
external_data_variables.append(
ExternalDataVariableEntity(
variable=val['variable'],

View File

@ -529,31 +529,40 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc
text = ''
if isinstance(value, str | int | float):
text = str(value)
elif isinstance(value, dict):
# other types
text = json.dumps(value, ensure_ascii=False)
elif isinstance(value, FileVar):
# convert file to markdown
text = value.to_markdown()
elif isinstance(value, dict):
# handle files
file_vars = self._fetch_files_from_variable_value(value)
if file_vars:
file_var = file_vars[0]
try:
file_var_obj = FileVar(**file_var)
# convert file to markdown
text = file_var_obj.to_markdown()
except Exception as e:
logger.error(f'Error creating file var: {e}')
if not text:
# other types
text = json.dumps(value, ensure_ascii=False)
elif isinstance(value, list):
for item in value:
if isinstance(item, FileVar):
text += item.to_markdown() + ' '
# handle files
file_vars = self._fetch_files_from_variable_value(value)
for file_var in file_vars:
try:
file_var_obj = FileVar(**file_var)
except Exception as e:
logger.error(f'Error creating file var: {e}')
continue
# convert file to markdown
text = file_var_obj.to_markdown() + ' '
text = text.strip()
# # handle files
# file_vars = self._fetch_files_from_variable_value(value)
# for file_var in file_vars:
# try:
# file_var_obj = FileVar(**file_var)
# except Exception as e:
# logger.error(f'Error creating file var: {e}')
# continue
#
# # convert file to markdown
# text = file_var_obj.to_markdown()
if not text and value:
# other types
text = json.dumps(value, ensure_ascii=False)

View File

@ -533,5 +533,7 @@ class WorkflowCycleManage:
if isinstance(value, dict):
if '__variant' in value and value['__variant'] == FileVar.__name__:
return value
elif isinstance(value, FileVar):
return value.to_dict()
return None

View File

@ -354,7 +354,6 @@ class AnthropicLargeLanguageModel(LargeLanguageModel):
system += message.content
prompt_message_dicts = []
for message in prompt_messages:
if not isinstance(message, SystemPromptMessage):
prompt_message_dicts.append(self._convert_prompt_message_to_dict(message))

View File

@ -123,6 +123,65 @@ LLM_BASE_MODELS = [
)
)
),
AzureBaseModel(
base_model_name='gpt-35-turbo-0125',
entity=AIModelEntity(
model='fake-deployment-name',
label=I18nObject(
en_US='fake-deployment-name-label',
),
model_type=ModelType.LLM,
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
ModelPropertyKey.MODE: LLMMode.CHAT.value,
ModelPropertyKey.CONTEXT_SIZE: 16385,
},
parameter_rules=[
ParameterRule(
name='temperature',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
),
ParameterRule(
name='top_p',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
),
ParameterRule(
name='presence_penalty',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY],
),
ParameterRule(
name='frequency_penalty',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY],
),
_get_max_tokens(default=512, min_val=1, max_val=4096),
ParameterRule(
name='response_format',
label=I18nObject(
zh_Hans='回复格式',
en_US='response_format'
),
type='string',
help=I18nObject(
zh_Hans='指定模型必须输出的格式',
en_US='specifying the format that the model must output'
),
required=False,
options=['text', 'json_object']
),
],
pricing=PriceConfig(
input=0.0005,
output=0.0015,
unit=0.001,
currency='USD',
)
)
),
AzureBaseModel(
base_model_name='gpt-4',
entity=AIModelEntity(
@ -273,6 +332,81 @@ LLM_BASE_MODELS = [
)
)
),
AzureBaseModel(
base_model_name='gpt-4-0125-preview',
entity=AIModelEntity(
model='fake-deployment-name',
label=I18nObject(
en_US='fake-deployment-name-label',
),
model_type=ModelType.LLM,
features=[
ModelFeature.AGENT_THOUGHT,
ModelFeature.MULTI_TOOL_CALL,
ModelFeature.STREAM_TOOL_CALL,
],
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
model_properties={
ModelPropertyKey.MODE: LLMMode.CHAT.value,
ModelPropertyKey.CONTEXT_SIZE: 128000,
},
parameter_rules=[
ParameterRule(
name='temperature',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE],
),
ParameterRule(
name='top_p',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P],
),
ParameterRule(
name='presence_penalty',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY],
),
ParameterRule(
name='frequency_penalty',
**PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY],
),
_get_max_tokens(default=512, min_val=1, max_val=4096),
ParameterRule(
name='seed',
label=I18nObject(
zh_Hans='种子',
en_US='Seed'
),
type='int',
help=I18nObject(
zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。',
en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.'
),
required=False,
precision=2,
min=0,
max=1,
),
ParameterRule(
name='response_format',
label=I18nObject(
zh_Hans='回复格式',
en_US='response_format'
),
type='string',
help=I18nObject(
zh_Hans='指定模型必须输出的格式',
en_US='specifying the format that the model must output'
),
required=False,
options=['text', 'json_object']
),
],
pricing=PriceConfig(
input=0.01,
output=0.03,
unit=0.001,
currency='USD',
)
)
),
AzureBaseModel(
base_model_name='gpt-4-1106-preview',
entity=AIModelEntity(

View File

@ -75,6 +75,12 @@ model_credential_schema:
show_on:
- variable: __model_type
value: llm
- label:
en_US: gpt-35-turbo-0125
value: gpt-35-turbo-0125
show_on:
- variable: __model_type
value: llm
- label:
en_US: gpt-35-turbo-16k
value: gpt-35-turbo-16k
@ -93,6 +99,12 @@ model_credential_schema:
show_on:
- variable: __model_type
value: llm
- label:
en_US: gpt-4-0125-preview
value: gpt-4-0125-preview
show_on:
- variable: __model_type
value: llm
- label:
en_US: gpt-4-1106-preview
value: gpt-4-1106-preview

View File

@ -48,23 +48,23 @@ provider_credential_schema:
- value: us-east-1
label:
en_US: US East (N. Virginia)
zh_Hans: US East (N. Virginia)
zh_Hans: 美国东部 (弗吉尼亚北部)
- value: us-west-2
label:
en_US: US West (Oregon)
zh_Hans: US West (Oregon)
zh_Hans: 美国西部 (俄勒冈州)
- value: ap-southeast-1
label:
en_US: Asia Pacific (Singapore)
zh_Hans: Asia Pacific (Singapore)
zh_Hans: 亚太地区 (新加坡)
- value: ap-northeast-1
label:
en_US: Asia Pacific (Tokyo)
zh_Hans: Asia Pacific (Tokyo)
zh_Hans: 亚太地区 (东京)
- value: eu-central-1
label:
en_US: Europe (Frankfurt)
zh_Hans: Europe (Frankfurt)
zh_Hans: 欧洲 (法兰克福)
- value: us-gov-west-1
label:
en_US: AWS GovCloud (US-West)

View File

@ -4,6 +4,8 @@
- anthropic.claude-v1
- anthropic.claude-v2
- anthropic.claude-v2:1
- anthropic.claude-3-sonnet-v1:0
- anthropic.claude-3-haiku-v1:0
- cohere.command-light-text-v14
- cohere.command-text-v14
- meta.llama2-13b-chat-v1

View File

@ -0,0 +1,57 @@
model: anthropic.claude-3-haiku-20240307-v1:0
label:
en_US: Claude 3 Haiku
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
# docs: https://docs.anthropic.com/claude/docs/system-prompts
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.003'
output: '0.015'
unit: '0.001'
currency: USD

View File

@ -0,0 +1,56 @@
model: anthropic.claude-3-sonnet-20240229-v1:0
label:
en_US: Claude 3 Sonnet
model_type: llm
features:
- agent-thought
- vision
model_properties:
mode: chat
context_size: 200000
# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html
parameter_rules:
- name: max_tokens
use_template: max_tokens
required: true
type: int
default: 4096
min: 1
max: 4096
help:
zh_Hans: 停止前生成的最大令牌数。请注意Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。
en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter.
- name: temperature
use_template: temperature
required: false
type: float
default: 1
min: 0.0
max: 1.0
help:
zh_Hans: 生成内容的随机性。
en_US: The amount of randomness injected into the response.
- name: top_p
required: false
type: float
default: 0.999
min: 0.000
max: 1.000
help:
zh_Hans: 在核采样中Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p但不能同时更改两者。
en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both.
- name: top_k
required: false
type: int
default: 0
min: 0
# tip docs from aws has error, max value is 500
max: 500
help:
zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。
en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses.
pricing:
input: '0.00025'
output: '0.00125'
unit: '0.001'
currency: USD

View File

@ -1,9 +1,22 @@
import base64
import json
import logging
import mimetypes
import time
from collections.abc import Generator
from typing import Optional, Union
from typing import Optional, Union, cast
import boto3
import requests
from anthropic import AnthropicBedrock, Stream
from anthropic.types import (
ContentBlockDeltaEvent,
Message,
MessageDeltaEvent,
MessageStartEvent,
MessageStopEvent,
MessageStreamEvent,
)
from botocore.config import Config
from botocore.exceptions import (
ClientError,
@ -13,14 +26,18 @@ from botocore.exceptions import (
UnknownServiceError,
)
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
PromptMessage,
PromptMessageContentType,
PromptMessageTool,
SystemPromptMessage,
TextPromptMessageContent,
UserPromptMessage,
)
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
@ -54,9 +71,293 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
:param user: unique user id
:return: full response or stream response chunk generator result
"""
# invoke claude 3 models via anthropic official SDK
if "anthropic.claude-3" in model:
return self._invoke_claude3(model, credentials, prompt_messages, model_parameters, stop, stream, user)
# invoke model
return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user)
def _invoke_claude3(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict,
stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None) -> Union[LLMResult, Generator]:
"""
Invoke Claude3 large language model
:param model: model name
:param credentials: model credentials
:param prompt_messages: prompt messages
:param model_parameters: model parameters
:param stop: stop words
:param stream: is stream response
:return: full response or stream response chunk generator result
"""
# use Anthropic official SDK references
# - https://docs.anthropic.com/claude/reference/claude-on-amazon-bedrock
# - https://github.com/anthropics/anthropic-sdk-python
client = AnthropicBedrock(
aws_access_key=credentials["aws_access_key_id"],
aws_secret_key=credentials["aws_secret_access_key"],
aws_region=credentials["aws_region"],
)
extra_model_kwargs = {}
if stop:
extra_model_kwargs['stop_sequences'] = stop
# Notice: If you request the current version of the SDK to the bedrock server,
# you will get the following error message and you need to wait for the service or SDK to be updated.
# Response: Error code: 400
# {'message': 'Malformed input request: #: subject must not be valid against schema
# {"required":["messages"]}#: extraneous key [metadata] is not permitted, please reformat your input and try again.'}
# TODO: Open in the future when the interface is properly supported
# if user:
# ref: https://github.com/anthropics/anthropic-sdk-python/blob/e84645b07ca5267066700a104b4d8d6a8da1383d/src/anthropic/resources/messages.py#L465
# extra_model_kwargs['metadata'] = message_create_params.Metadata(user_id=user)
system, prompt_message_dicts = self._convert_claude3_prompt_messages(prompt_messages)
if system:
extra_model_kwargs['system'] = system
response = client.messages.create(
model=model,
messages=prompt_message_dicts,
stream=stream,
**model_parameters,
**extra_model_kwargs
)
if stream:
return self._handle_claude3_stream_response(model, credentials, response, prompt_messages)
return self._handle_claude3_response(model, credentials, response, prompt_messages)
def _handle_claude3_response(self, model: str, credentials: dict, response: Message,
prompt_messages: list[PromptMessage]) -> LLMResult:
"""
Handle llm chat response
:param model: model name
:param credentials: credentials
:param response: response
:param prompt_messages: prompt messages
:return: full response chunk generator result
"""
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=response.content[0].text
)
# calculate num tokens
if response.usage:
# transform usage
prompt_tokens = response.usage.input_tokens
completion_tokens = response.usage.output_tokens
else:
# calculate num tokens
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
# transform response
response = LLMResult(
model=response.model,
prompt_messages=prompt_messages,
message=assistant_prompt_message,
usage=usage
)
return response
def _handle_claude3_stream_response(self, model: str, credentials: dict, response: Stream[MessageStreamEvent],
prompt_messages: list[PromptMessage], ) -> Generator:
"""
Handle llm chat stream response
:param model: model name
:param credentials: credentials
:param response: response
:param prompt_messages: prompt messages
:return: full response or stream response chunk generator result
"""
try:
full_assistant_content = ''
return_model = None
input_tokens = 0
output_tokens = 0
finish_reason = None
index = 0
for chunk in response:
if isinstance(chunk, MessageStartEvent):
return_model = chunk.message.model
input_tokens = chunk.message.usage.input_tokens
elif isinstance(chunk, MessageDeltaEvent):
output_tokens = chunk.usage.output_tokens
finish_reason = chunk.delta.stop_reason
elif isinstance(chunk, MessageStopEvent):
usage = self._calc_response_usage(model, credentials, input_tokens, output_tokens)
yield LLMResultChunk(
model=return_model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=index + 1,
message=AssistantPromptMessage(
content=''
),
finish_reason=finish_reason,
usage=usage
)
)
elif isinstance(chunk, ContentBlockDeltaEvent):
chunk_text = chunk.delta.text if chunk.delta.text else ''
full_assistant_content += chunk_text
assistant_prompt_message = AssistantPromptMessage(
content=chunk_text if chunk_text else '',
)
index = chunk.index
yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=index,
message=assistant_prompt_message,
)
)
except Exception as ex:
raise InvokeError(str(ex))
def _calc_claude3_response_usage(self, model: str, credentials: dict, prompt_tokens: int, completion_tokens: int) -> LLMUsage:
"""
Calculate response usage
:param model: model name
:param credentials: model credentials
:param prompt_tokens: prompt tokens
:param completion_tokens: completion tokens
:return: usage
"""
# get prompt price info
prompt_price_info = self.get_price(
model=model,
credentials=credentials,
price_type=PriceType.INPUT,
tokens=prompt_tokens,
)
# get completion price info
completion_price_info = self.get_price(
model=model,
credentials=credentials,
price_type=PriceType.OUTPUT,
tokens=completion_tokens
)
# transform usage
usage = LLMUsage(
prompt_tokens=prompt_tokens,
prompt_unit_price=prompt_price_info.unit_price,
prompt_price_unit=prompt_price_info.unit,
prompt_price=prompt_price_info.total_amount,
completion_tokens=completion_tokens,
completion_unit_price=completion_price_info.unit_price,
completion_price_unit=completion_price_info.unit,
completion_price=completion_price_info.total_amount,
total_tokens=prompt_tokens + completion_tokens,
total_price=prompt_price_info.total_amount + completion_price_info.total_amount,
currency=prompt_price_info.currency,
latency=time.perf_counter() - self.started_at
)
return usage
def _convert_claude3_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]:
"""
Convert prompt messages to dict list and system
"""
system = ""
first_loop = True
for message in prompt_messages:
if isinstance(message, SystemPromptMessage):
message.content=message.content.strip()
if first_loop:
system=message.content
first_loop=False
else:
system+="\n"
system+=message.content
prompt_message_dicts = []
for message in prompt_messages:
if not isinstance(message, SystemPromptMessage):
prompt_message_dicts.append(self._convert_claude3_prompt_message_to_dict(message))
return system, prompt_message_dicts
def _convert_claude3_prompt_message_to_dict(self, message: PromptMessage) -> dict:
"""
Convert PromptMessage to dict
"""
if isinstance(message, UserPromptMessage):
message = cast(UserPromptMessage, message)
if isinstance(message.content, str):
message_dict = {"role": "user", "content": message.content}
else:
sub_messages = []
for message_content in message.content:
if message_content.type == PromptMessageContentType.TEXT:
message_content = cast(TextPromptMessageContent, message_content)
sub_message_dict = {
"type": "text",
"text": message_content.data
}
sub_messages.append(sub_message_dict)
elif message_content.type == PromptMessageContentType.IMAGE:
message_content = cast(ImagePromptMessageContent, message_content)
if not message_content.data.startswith("data:"):
# fetch image data from url
try:
image_content = requests.get(message_content.data).content
mime_type, _ = mimetypes.guess_type(message_content.data)
base64_data = base64.b64encode(image_content).decode('utf-8')
except Exception as ex:
raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}")
else:
data_split = message_content.data.split(";base64,")
mime_type = data_split[0].replace("data:", "")
base64_data = data_split[1]
if mime_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]:
raise ValueError(f"Unsupported image type {mime_type}, "
f"only support image/jpeg, image/png, image/gif, and image/webp")
sub_message_dict = {
"type": "image",
"source": {
"type": "base64",
"media_type": mime_type,
"data": base64_data
}
}
sub_messages.append(sub_message_dict)
message_dict = {"role": "user", "content": sub_messages}
elif isinstance(message, AssistantPromptMessage):
message = cast(AssistantPromptMessage, message)
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemPromptMessage):
message = cast(SystemPromptMessage, message)
message_dict = {"role": "system", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
def get_num_tokens(self, model: str, credentials: dict, messages: list[PromptMessage] | str,
tools: Optional[list[PromptMessageTool]] = None) -> int:
"""
@ -101,7 +402,19 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
:param credentials: model credentials
:return:
"""
if "anthropic.claude-3" in model:
try:
self._invoke_claude3(model=model,
credentials=credentials,
prompt_messages=[{"role": "user", "content": "ping"}],
model_parameters={},
stop=None,
stream=False)
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))
try:
ping_message = UserPromptMessage(content="ping")
self._generate(model=model,

View File

@ -449,7 +449,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
help=I18nObject(en_US="The temperature of the model. "
"Increasing the temperature will make the model answer "
"more creatively. (Default: 0.8)"),
default=0.8,
default=0.1,
min=0,
max=2
),
@ -472,7 +472,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
help=I18nObject(en_US="Reduces the probability of generating nonsense. "
"A higher value (e.g. 100) will give more diverse answers, "
"while a lower value (e.g. 10) will be more conservative. (Default: 40)"),
default=40,
min=1,
max=100
),
@ -483,7 +482,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
help=I18nObject(en_US="Sets how strongly to penalize repetitions. "
"A higher value (e.g., 1.5) will penalize repetitions more strongly, "
"while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)"),
default=1.1,
min=-2,
max=2
),
@ -494,7 +492,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.INT,
help=I18nObject(en_US="Maximum number of tokens to predict when generating text. "
"(Default: 128, -1 = infinite generation, -2 = fill context)"),
default=128,
default=512 if int(credentials.get('max_tokens', 4096)) >= 768 else 128,
min=-2,
max=int(credentials.get('max_tokens', 4096)),
),
@ -504,7 +502,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.INT,
help=I18nObject(en_US="Enable Mirostat sampling for controlling perplexity. "
"(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"),
default=0,
min=0,
max=2
),
@ -516,7 +513,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
"the generated text. A lower learning rate will result in slower adjustments, "
"while a higher learning rate will make the algorithm more responsive. "
"(Default: 0.1)"),
default=0.1,
precision=1
),
ParameterRule(
@ -525,7 +521,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.FLOAT,
help=I18nObject(en_US="Controls the balance between coherence and diversity of the output. "
"A lower value will result in more focused and coherent text. (Default: 5.0)"),
default=5.0,
precision=1
),
ParameterRule(
@ -543,7 +538,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.INT,
help=I18nObject(en_US="The number of layers to send to the GPU(s). "
"On macOS it defaults to 1 to enable metal support, 0 to disable."),
default=1,
min=0,
max=1
),
@ -563,7 +557,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.INT,
help=I18nObject(en_US="Sets how far back for the model to look back to prevent repetition. "
"(Default: 64, 0 = disabled, -1 = num_ctx)"),
default=64,
min=-1
),
ParameterRule(
@ -573,7 +566,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
help=I18nObject(en_US="Tail free sampling is used to reduce the impact of less probable tokens "
"from the output. A higher value (e.g., 2.0) will reduce the impact more, "
"while a value of 1.0 disables this setting. (default: 1)"),
default=1,
precision=1
),
ParameterRule(
@ -583,7 +575,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
help=I18nObject(en_US="Sets the random number seed to use for generation. Setting this to "
"a specific number will make the model generate the same text for "
"the same prompt. (Default: 0)"),
default=0
),
ParameterRule(
name='format',

View File

@ -8,54 +8,70 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
default: 1.0
type: float
default: 0.85
min: 0.0
max: 2.0
help:
zh_Hans: 用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
- name: max_tokens
use_template: max_tokens
type: int
default: 2000
min: 1
max: 2000
help:
zh_Hans: 用于指定模型在生成内容时token的最大数量它定义了生成的上限但不保证每次都会生成到这个数量。
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
- name: top_p
use_template: top_p
type: float
default: 0.8
min: 0.1
max: 0.9
help:
zh_Hans: 生成过程中核采样方法概率阈值例如取值为0.8时仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
- name: max_tokens
use_template: max_tokens
default: 1500
min: 1
max: 6000
help:
zh_Hans: 用于限制模型生成token的数量max_tokens设置的是生成上限并不表示一定会生成这么多的token数量。
en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated.
- name: top_k
type: int
min: 0
max: 99
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。默认不传递该参数取值为None或当top_k大于100时表示不启用top_k策略此时仅有top_p策略生效。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect.
required: false
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
- name: seed
required: false
type: int
default: 1234
label:
zh_Hans: 随机种子
en_US: Random seed
type: int
help:
zh_Hans: 生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。
en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types.
required: false
zh_Hans: 生成时使用的随机数种子用户控制模型生成内容的随机性。支持无符号64位整数默认值为 1234。在使用seed时模型将尽可能生成相同或相似的结果但目前不保证每次生成的结果完全相同。
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
- name: repetition_penalty
label:
en_US: Repetition penalty
required: false
type: float
default: 1.1
label:
en_US: Repetition penalty
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment.
required: false
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
input: '0.12'
output: '0.12'
unit: '0.001'
currency: RMB

View File

@ -4,58 +4,74 @@ label:
model_type: llm
model_properties:
mode: chat
context_size: 30000
context_size: 32768
parameter_rules:
- name: temperature
use_template: temperature
default: 1.0
type: float
default: 0.85
min: 0.0
max: 2.0
help:
zh_Hans: 用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
- name: max_tokens
use_template: max_tokens
type: int
default: 2000
min: 1
max: 2000
help:
zh_Hans: 用于指定模型在生成内容时token的最大数量它定义了生成的上限但不保证每次都会生成到这个数量。
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
- name: top_p
use_template: top_p
type: float
default: 0.8
min: 0.1
max: 0.9
help:
zh_Hans: 生成过程中核采样方法概率阈值例如取值为0.8时仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
- name: max_tokens
use_template: max_tokens
default: 2000
min: 1
max: 28000
help:
zh_Hans: 用于限制模型生成token的数量max_tokens设置的是生成上限并不表示一定会生成这么多的token数量。
en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated.
- name: top_k
type: int
min: 0
max: 99
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。默认不传递该参数取值为None或当top_k大于100时表示不启用top_k策略此时仅有top_p策略生效。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect.
required: false
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
- name: seed
required: false
type: int
default: 1234
label:
zh_Hans: 随机种子
en_US: Random seed
type: int
help:
zh_Hans: 生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。
en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types.
required: false
zh_Hans: 生成时使用的随机数种子用户控制模型生成内容的随机性。支持无符号64位整数默认值为 1234。在使用seed时模型将尽可能生成相同或相似的结果但目前不保证每次生成的结果完全相同。
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
- name: repetition_penalty
label:
en_US: Repetition penalty
required: false
type: float
default: 1.1
label:
en_US: Repetition penalty
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment.
required: false
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
input: '0.12'
output: '0.12'
unit: '0.001'
currency: RMB

View File

@ -8,54 +8,70 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
default: 1.0
type: float
default: 0.85
min: 0.0
max: 2.0
help:
zh_Hans: 用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
- name: max_tokens
use_template: max_tokens
type: int
default: 2000
min: 1
max: 2000
help:
zh_Hans: 用于指定模型在生成内容时token的最大数量它定义了生成的上限但不保证每次都会生成到这个数量。
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
- name: top_p
use_template: top_p
type: float
default: 0.8
min: 0.1
max: 0.9
help:
zh_Hans: 生成过程中核采样方法概率阈值例如取值为0.8时仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
- name: max_tokens
use_template: max_tokens
default: 1500
min: 1
max: 6000
help:
zh_Hans: 用于限制模型生成token的数量max_tokens设置的是生成上限并不表示一定会生成这么多的token数量。
en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated.
- name: top_k
type: int
min: 0
max: 99
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。默认不传递该参数取值为None或当top_k大于100时表示不启用top_k策略此时仅有top_p策略生效。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect.
required: false
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
- name: seed
required: false
type: int
default: 1234
label:
zh_Hans: 随机种子
en_US: Random seed
type: int
help:
zh_Hans: 生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。
en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types.
required: false
zh_Hans: 生成时使用的随机数种子用户控制模型生成内容的随机性。支持无符号64位整数默认值为 1234。在使用seed时模型将尽可能生成相同或相似的结果但目前不保证每次生成的结果完全相同。
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
- name: repetition_penalty
label:
en_US: Repetition penalty
required: false
type: float
default: 1.1
label:
en_US: Repetition penalty
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment.
required: false
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:
input: '0.12'
output: '0.12'
unit: '0.001'
currency: RMB

View File

@ -4,58 +4,70 @@ label:
model_type: llm
model_properties:
mode: completion
context_size: 32000
context_size: 32768
parameter_rules:
- name: temperature
use_template: temperature
default: 1.0
type: float
default: 0.85
min: 0.0
max: 2.0
help:
zh_Hans: 用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
- name: max_tokens
use_template: max_tokens
type: int
default: 1500
min: 1
max: 1500
help:
zh_Hans: 用于指定模型在生成内容时token的最大数量它定义了生成的上限但不保证每次都会生成到这个数量。
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
- name: top_p
use_template: top_p
type: float
default: 0.8
min: 0.1
max: 0.9
help:
zh_Hans: 生成过程中核采样方法概率阈值例如取值为0.8时仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
- name: max_tokens
use_template: max_tokens
default: 2000
min: 1
max: 30000
help:
zh_Hans: 用于限制模型生成token的数量max_tokens设置的是生成上限并不表示一定会生成这么多的token数量。
en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated.
- name: top_k
type: int
min: 0
max: 99
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。默认不传递该参数取值为None或当top_k大于100时表示不启用top_k策略此时仅有top_p策略生效。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect.
required: false
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
- name: seed
required: false
type: int
default: 1234
label:
zh_Hans: 随机种子
en_US: Random seed
type: int
help:
zh_Hans: 生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。
en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types.
required: false
zh_Hans: 生成时使用的随机数种子用户控制模型生成内容的随机性。支持无符号64位整数默认值为 1234。在使用seed时模型将尽可能生成相同或相似的结果但目前不保证每次生成的结果完全相同。
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
- name: repetition_penalty
label:
en_US: Repetition penalty
required: false
type: float
default: 1.1
label:
en_US: Repetition penalty
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment.
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@ -8,55 +8,66 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
default: 1.0
type: float
default: 0.85
min: 0.0
max: 2.0
help:
zh_Hans: 用于控制随机性和多样性的程度。具体来说temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值使得更多的低概率词被选择生成结果更加多样化而较低的temperature值则会增强概率分布的峰值使得高概率词更容易被选择生成结果更加确定。
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
- name: max_tokens
use_template: max_tokens
type: int
default: 1500
min: 1
max: 1500
help:
zh_Hans: 用于指定模型在生成内容时token的最大数量它定义了生成的上限但不保证每次都会生成到这个数量。
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
- name: top_p
use_template: top_p
type: float
default: 0.8
min: 0.1
max: 0.9
help:
zh_Hans: 生成过程中核采样方法概率阈值例如取值为0.8时仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
- name: max_tokens
use_template: max_tokens
default: 1500
min: 1
max: 6000
help:
zh_Hans: 用于限制模型生成token的数量max_tokens设置的是生成上限并不表示一定会生成这么多的token数量。
en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated.
- name: top_k
type: int
min: 0
max: 99
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。默认不传递该参数取值为None或当top_k大于100时表示不启用top_k策略此时仅有top_p策略生效。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect.
required: false
zh_Hans: 生成时采样候选集的大小。例如取值为50时仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大生成的随机性越高取值越小生成的确定性越高。
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
- name: seed
required: false
type: int
default: 1234
label:
zh_Hans: 随机种子
en_US: Random seed
type: int
help:
zh_Hans: 生成时随机数的种子用于控制模型生成的随机性。如果使用相同的种子每次运行生成的结果都将相同当需要复现模型的生成结果时可以使用相同的种子。seed参数支持无符号64位整数类型。
en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types.
required: false
zh_Hans: 生成时使用的随机数种子用户控制模型生成内容的随机性。支持无符号64位整数默认值为 1234。在使用seed时模型将尽可能生成相同或相似的结果但目前不保证每次生成的结果完全相同。
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
- name: repetition_penalty
label:
en_US: Repetition penalty
required: false
type: float
default: 1.1
label:
en_US: Repetition penalty
help:
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment.
required: false
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
- name: enable_search
type: boolean
default: false
help:
zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。
en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic.
- name: response_format
use_template: response_format
pricing:

View File

@ -24,9 +24,9 @@ provider_credential_schema:
credential_form_schemas:
- variable: dashscope_api_key
label:
en_US: APIKey
en_US: API Key
type: secret-input
required: true
placeholder:
zh_Hans: 在此输入您的 APIKey
en_US: Enter your APIKey
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key

View File

@ -1,20 +1,12 @@
<svg width="80" height="22" viewBox="0 0 450 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="300" cy="300" r="300" style="fill:rgb(0,52,37);"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M452.119,361.224C452.119,349.527 442.623,340.031 430.926,340.031C419.229,340.031 409.733,349.527 409.733,361.224L409.733,470.486C409.733,482.183 419.229,491.679 430.926,491.679C442.623,491.679 452.119,482.183 452.119,470.486L452.119,361.224Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M422.005,133.354C413.089,125.771 399.714,126.851 392.131,135.767L273.699,275.021C270.643,278.614 268.994,282.932 268.698,287.302C268.532,288.371 268.446,289.466 268.446,290.581L268.446,468.603C268.446,480.308 277.934,489.796 289.639,489.796C301.344,489.796 310.832,480.308 310.832,468.603L310.832,296.784L424.419,163.228C432.002,154.312 430.921,140.937 422.005,133.354Z" style="fill:white;"/>
</g>
<g transform="matrix(0.13359,-0.109514,0.109514,0.13359,-0.630793,25.9151)">
<path d="M156.358,155.443C156.358,143.746 146.862,134.25 135.165,134.25C123.468,134.25 113.972,143.746 113.972,155.443L113.972,287.802C113.972,299.499 123.468,308.995 135.165,308.995C146.862,308.995 156.358,299.499 156.358,287.802L156.358,155.443Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="460.126" cy="279.278" r="25.903" style="fill:rgb(0,255,37);"/>
</g>
<g transform="matrix(1,0,0,1,-77.4848,13.0849)">
<text x="210.275px" y="74.595px" style="font-family:'AlibabaPuHuiTi_3_55_Regular', 'Alibaba PuHuiTi 3.0', serif;font-size:80px;">01<tspan x="294.355px " y="74.595px ">.</tspan>AI</text>
</g>
</svg>
<svg width="64" height="24" viewBox="0 0 64 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill-rule="evenodd" clip-rule="evenodd" d="M1.28808 1.39558C1.76461 1.00315 2.46905 1.07132 2.86149 1.54785L7.7517 7.48596C8.14414 7.96249 8.07597 8.66693 7.59944 9.05937C7.1229 9.45181 6.41847 9.38363 6.02603 8.9071L1.13582 2.96899C0.743382 2.49246 0.811553 1.78802 1.28808 1.39558Z" fill="#133426"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M10.1689 22.3553C9.55157 22.3553 9.05112 21.8549 9.05109 21.2375L9.05075 10.7193C9.05074 10.4478 9.14951 10.1856 9.32863 9.98168L16.1801 2.17956C16.5875 1.7157 17.2937 1.66989 17.7576 2.07723C18.2214 2.48457 18.2673 3.19081 17.8599 3.65467L11.2863 11.1403L11.2866 21.2375C11.2866 21.8548 10.7862 22.3552 10.1689 22.3553Z" fill="#133426"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M18.2138 13.7077C18.8311 13.7077 19.3315 14.2081 19.3315 14.8255V21.0896C19.3315 21.7069 18.8311 22.2073 18.2138 22.2073C17.5965 22.2073 17.096 21.7069 17.096 21.0896V14.8255C17.096 14.2081 17.5965 13.7077 18.2138 13.7077Z" fill="#133426"/>
<circle cx="19.7936" cy="10.3307" r="1.73695" fill="#00FF00"/>
<path d="M61.6555 10.3637V22H60.593V10.3637H61.6555Z" fill="black"/>
<path d="M50.1101 22H48.9964L53.2294 10.3637H54.3658L58.5987 22H57.4851L53.8374 11.7444H53.7578L50.1101 22ZM50.9112 17.5398H56.6839V18.4944H50.9112V17.5398Z" fill="black"/>
<path d="M46.3928 22.0853C46.1693 22.0853 45.9761 22.0057 45.8132 21.8466C45.6541 21.6838 45.5746 21.4906 45.5746 21.2671C45.5746 21.0398 45.6541 20.8466 45.8132 20.6875C45.9761 20.5285 46.1693 20.4489 46.3928 20.4489C46.62 20.4489 46.8132 20.5285 46.9723 20.6875C47.1314 20.8466 47.2109 21.0398 47.2109 21.2671C47.2109 21.4148 47.1731 21.5512 47.0973 21.6762C47.0253 21.8012 46.9268 21.9016 46.8018 21.9773C46.6806 22.0493 46.5443 22.0853 46.3928 22.0853Z" fill="black"/>
<path d="M42.6996 10.3637V22H41.6371V11.4773H41.5689L38.8416 13.2898V12.1875L41.5916 10.3637H42.6996Z" fill="black"/>
<path d="M32.9098 22.1591C32.0916 22.1591 31.3928 21.9243 30.8132 21.4546C30.2375 20.9811 29.7943 20.2974 29.4837 19.4035C29.1768 18.5095 29.0234 17.4357 29.0234 16.1819C29.0234 14.9319 29.1768 13.8618 29.4837 12.9716C29.7943 12.0777 30.2393 11.394 30.8189 10.9205C31.4022 10.4432 32.0992 10.2046 32.9098 10.2046C33.7204 10.2046 34.4155 10.4432 34.995 10.9205C35.5784 11.394 36.0234 12.0777 36.3303 12.9716C36.6409 13.8618 36.7962 14.9319 36.7962 16.1819C36.7962 17.4357 36.6409 18.5095 36.3303 19.4035C36.0234 20.2974 35.5803 20.9811 35.0007 21.4546C34.425 21.9243 33.728 22.1591 32.9098 22.1591ZM32.9098 21.2046C33.8075 21.2046 34.5083 20.7671 35.0121 19.8921C35.5159 19.0133 35.7678 17.7766 35.7678 16.1819C35.7678 15.1213 35.6522 14.216 35.4212 13.466C35.1939 12.7122 34.8662 12.1364 34.4382 11.7387C34.014 11.341 33.5045 11.1421 32.9098 11.1421C32.0196 11.1421 31.3208 11.5853 30.8132 12.4716C30.3056 13.3542 30.0518 14.591 30.0518 16.1819C30.0518 17.2425 30.1655 18.1478 30.3928 18.8978C30.6238 19.6478 30.9515 20.2197 31.3757 20.6137C31.8037 21.0076 32.3151 21.2046 32.9098 21.2046Z" fill="black"/>
</svg>

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@ -1,20 +0,0 @@
<svg width="80" height="22" viewBox="0 0 450 120" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="300" cy="300" r="300" style="fill:rgb(0,52,37);"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M452.119,361.224C452.119,349.527 442.623,340.031 430.926,340.031C419.229,340.031 409.733,349.527 409.733,361.224L409.733,470.486C409.733,482.183 419.229,491.679 430.926,491.679C442.623,491.679 452.119,482.183 452.119,470.486L452.119,361.224Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<path d="M422.005,133.354C413.089,125.771 399.714,126.851 392.131,135.767L273.699,275.021C270.643,278.614 268.994,282.932 268.698,287.302C268.532,288.371 268.446,289.466 268.446,290.581L268.446,468.603C268.446,480.308 277.934,489.796 289.639,489.796C301.344,489.796 310.832,480.308 310.832,468.603L310.832,296.784L424.419,163.228C432.002,154.312 430.921,140.937 422.005,133.354Z" style="fill:white;"/>
</g>
<g transform="matrix(0.13359,-0.109514,0.109514,0.13359,-0.630793,25.9151)">
<path d="M156.358,155.443C156.358,143.746 146.862,134.25 135.165,134.25C123.468,134.25 113.972,143.746 113.972,155.443L113.972,287.802C113.972,299.499 123.468,308.995 135.165,308.995C146.862,308.995 156.358,299.499 156.358,287.802L156.358,155.443Z" style="fill:white;"/>
</g>
<g transform="matrix(0.172742,0,0,0.172742,9.60932,8.17741)">
<circle cx="460.126" cy="279.278" r="25.903" style="fill:rgb(0,255,37);"/>
</g>
<g transform="matrix(1,0,0,1,-77.4848,13.0849)">
<text x="210.275px" y="74.595px" style="font-family:'AlibabaPuHuiTi_3_55_Regular', 'Alibaba PuHuiTi 3.0', serif;font-size:80px;">零一万物</text>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,7 +1,8 @@
<svg width="24" height="24" viewBox="0 0 600 600" fill="none" xmlns="http://www.w3.org/2000/svg">
<circle cx="300" cy="300" r="300" fill="#003425"/>
<rect x="409.733" y="340.031" width="42.3862" height="151.648" rx="21.1931" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M422.005 133.354C413.089 125.771 399.714 126.851 392.131 135.767L273.699 275.021C270.643 278.614 268.994 282.932 268.698 287.302C268.532 288.371 268.446 289.466 268.446 290.581V468.603C268.446 480.308 277.934 489.796 289.639 489.796C301.344 489.796 310.832 480.308 310.832 468.603V296.784L424.419 163.228C432.002 154.312 430.921 140.937 422.005 133.354Z" fill="white"/>
<rect x="113.972" y="134.25" width="42.3862" height="174.745" rx="21.1931" transform="rotate(-39.3441 113.972 134.25)" fill="white"/>
<circle cx="460.126" cy="279.278" r="25.9027" fill="#00FF25"/>
</svg>
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect x="1" y="1" width="22" height="22" rx="5" fill="#133426"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M4.52004 4.43887C4.87945 4.1429 5.41077 4.19431 5.70676 4.55371L9.39515 9.03221C9.69114 9.39161 9.63972 9.92289 9.2803 10.2189C8.92089 10.5148 8.38957 10.4634 8.09358 10.104L4.40519 5.62553C4.1092 5.26613 4.16062 4.73485 4.52004 4.43887Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M11.2183 20.2466C10.7527 20.2466 10.3752 19.8692 10.3752 19.4036L10.3749 11.4708C10.3749 11.266 10.4494 11.0683 10.5845 10.9145L15.7522 5.03014C16.0594 4.6803 16.5921 4.64575 16.942 4.95297C17.2918 5.26018 17.3264 5.79283 17.0192 6.14266L12.0611 11.7883L12.0613 19.4035C12.0613 19.8691 11.6839 20.2466 11.2183 20.2466Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M17.2861 13.7246C17.7517 13.7246 18.1291 14.102 18.1291 14.5676V19.292C18.1291 19.7576 17.7517 20.135 17.2861 20.135C16.8205 20.135 16.443 19.7576 16.443 19.292V14.5676C16.443 14.102 16.8205 13.7246 17.2861 13.7246Z" fill="white"/>
<ellipse cx="18.4761" cy="11.1782" rx="1.31008" ry="1.31" fill="#00FF00"/>
</svg>

Before

Width:  |  Height:  |  Size: 882 B

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@ -9,7 +9,7 @@ icon_small:
en_US: icon_s_en.svg
icon_large:
en_US: icon_l_en.svg
background: "#EFFDFD"
background: "#E9F1EC"
help:
title:
en_US: Get your API Key from 01.ai

View File

@ -9,7 +9,7 @@ import requests
import core.helper.ssrf_proxy as ssrf_proxy
from core.tools.entities.tool_bundle import ApiBasedToolBundle
from core.tools.entities.tool_entities import ToolInvokeMessage
from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.errors import ToolInvokeError, ToolParameterValidationError, ToolProviderCredentialValidationError
from core.tools.tool.tool import Tool
API_TOOL_DEFAULT_TIMEOUT = (10, 60)
@ -81,7 +81,7 @@ class ApiTool(Tool):
needed_parameters = [parameter for parameter in self.api_bundle.parameters if parameter.required]
for parameter in needed_parameters:
if parameter.required and parameter.name not in parameters:
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter.name}")
raise ToolParameterValidationError(f"Missing required parameter {parameter.name}")
if parameter.default is not None and parameter.name not in parameters:
parameters[parameter.name] = parameter.default
@ -94,7 +94,7 @@ class ApiTool(Tool):
"""
if isinstance(response, httpx.Response):
if response.status_code >= 400:
raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}")
raise ToolInvokeError(f"Request failed with status code {response.status_code} and {response.text}")
if not response.content:
return 'Empty response from the tool, please check your parameters and try again.'
try:
@ -107,7 +107,7 @@ class ApiTool(Tool):
return response.text
elif isinstance(response, requests.Response):
if not response.ok:
raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}")
raise ToolInvokeError(f"Request failed with status code {response.status_code} and {response.text}")
if not response.content:
return 'Empty response from the tool, please check your parameters and try again.'
try:
@ -139,7 +139,7 @@ class ApiTool(Tool):
if parameter['name'] in parameters:
value = parameters[parameter['name']]
elif parameter['required']:
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}")
else:
value = (parameter.get('schema', {}) or {}).get('default', '')
path_params[parameter['name']] = value
@ -149,7 +149,7 @@ class ApiTool(Tool):
if parameter['name'] in parameters:
value = parameters[parameter['name']]
elif parameter['required']:
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}")
else:
value = (parameter.get('schema', {}) or {}).get('default', '')
params[parameter['name']] = value
@ -159,7 +159,7 @@ class ApiTool(Tool):
if parameter['name'] in parameters:
value = parameters[parameter['name']]
elif parameter['required']:
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}")
else:
value = (parameter.get('schema', {}) or {}).get('default', '')
cookies[parameter['name']] = value
@ -169,7 +169,7 @@ class ApiTool(Tool):
if parameter['name'] in parameters:
value = parameters[parameter['name']]
elif parameter['required']:
raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}")
raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}")
else:
value = (parameter.get('schema', {}) or {}).get('default', '')
headers[parameter['name']] = value
@ -188,7 +188,7 @@ class ApiTool(Tool):
# convert type
body[name] = self._convert_body_property_type(property, parameters[name])
elif name in required:
raise ToolProviderCredentialValidationError(
raise ToolParameterValidationError(
f"Missing required parameter {name} in operation {self.api_bundle.operation_id}"
)
elif 'default' in property:

View File

@ -37,7 +37,7 @@ class HttpRequestNodeData(BaseNodeData):
data: Union[None, str]
variables: list[VariableSelector]
method: Literal['get', 'post', 'put', 'patch', 'delete']
method: Literal['get', 'post', 'put', 'patch', 'delete', 'head']
url: str
authorization: Authorization
headers: str

View File

@ -1,3 +1,4 @@
import json
import re
from copy import deepcopy
from random import randint
@ -147,6 +148,19 @@ class HttpExecutor:
# init template
self._init_template(node_data, variables)
def _is_json_body(self, node_data: HttpRequestNodeData):
"""
check if body is json
"""
if node_data.body and node_data.body.type == 'json':
try:
json.loads(node_data.body.data)
return True
except:
return False
return False
def _init_template(self, node_data: HttpRequestNodeData, variables: dict[str, Any]):
"""
init template
@ -187,7 +201,7 @@ class HttpExecutor:
else:
raise ValueError(f'Invalid params {kv}')
self.params[k] = v
self.params[k.strip()] = v
# extract all template in headers
header_template = re.findall(r'{{(.*?)}}', node_data.headers) or []
@ -213,18 +227,24 @@ class HttpExecutor:
else:
raise ValueError(f'Invalid headers {kv}')
self.headers[k] = v
self.headers[k.strip()] = v.strip()
# extract all template in body
if node_data.body:
# check if it's a valid JSON
is_valid_json = self._is_json_body(node_data)
body_template = re.findall(r'{{(.*?)}}', node_data.body.data or '') or []
body_template = list(set(body_template))
original_body = node_data.body.data or ''
for body in body_template:
if not body:
continue
original_body = original_body.replace(f'{{{{{body}}}}}', str(variables.get(body, '')))
body_value = variables.get(body, '')
if is_valid_json:
body_value = body_value.replace('"', '\\"')
original_body = original_body.replace(f'{{{{{body}}}}}', body_value)
if node_data.body.type == 'json':
self.headers['Content-Type'] = 'application/json'
@ -239,9 +259,9 @@ class HttpExecutor:
continue
kv = kv.split(':')
if len(kv) == 2:
body[kv[0]] = kv[1]
body[kv[0].strip()] = kv[1]
elif len(kv) == 1:
body[kv[0]] = ''
body[kv[0].strip()] = ''
else:
raise ValueError(f'Invalid body {kv}')
@ -361,12 +381,12 @@ class HttpExecutor:
# if files, use multipart/form-data with boundary
if self.files:
boundary = self.boundary
raw_request += f'--{boundary}'
for k, v in self.files.items():
raw_request += f'Content-Disposition: form-data; name="{k}"; filename="{v[0]}"\n'
raw_request += f'Content-Type: {v[1]}\n\n'
raw_request += v[1] + '\n'
raw_request += f'{boundary}\n'
raw_request += '--\n'
raw_request += f'\nContent-Disposition: form-data; name="{k}"\n\n'
raw_request += f'{v[1]}\n'
raw_request += f'--{boundary}'
raw_request += '--'
else:
raw_request += self.body or ''

View File

@ -12,7 +12,7 @@ gunicorn~=21.2.0
gevent~=23.9.1
langchain==0.0.250
openai~=1.13.3
tiktoken~=0.5.2
tiktoken~=0.6.0
psycopg2-binary~=2.9.6
pycryptodome==3.19.1
python-dotenv==1.0.0
@ -36,7 +36,7 @@ python-docx~=1.1.0
pypdfium2==4.16.0
resend~=0.7.0
pyjwt~=2.8.0
anthropic~=0.17.0
anthropic~=0.20.0
newspaper3k==0.2.8
google-api-python-client==2.90.0
wikipedia==1.4.0