diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml
new file mode 100644
index 0000000000..5aee64b8e6
--- /dev/null
+++ b/.github/workflows/web-tests.yml
@@ -0,0 +1,46 @@
+name: Web Tests
+
+on:
+ pull_request:
+ branches:
+ - main
+ paths:
+ - web/**
+
+concurrency:
+ group: web-tests-${{ github.head_ref || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ test:
+ name: Web Tests
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ./web
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Check changed files
+ id: changed-files
+ uses: tj-actions/changed-files@v45
+ with:
+ files: web/**
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ if: steps.changed-files.outputs.any_changed == 'true'
+ with:
+ node-version: 20
+ cache: yarn
+ cache-dependency-path: ./web/package.json
+
+ - name: Install dependencies
+ if: steps.changed-files.outputs.any_changed == 'true'
+ run: yarn install --frozen-lockfile
+
+ - name: Run tests
+ if: steps.changed-files.outputs.any_changed == 'true'
+ run: yarn test
diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py
index 1b412b8639..203aca3384 100644
--- a/api/core/app/apps/base_app_runner.py
+++ b/api/core/app/apps/base_app_runner.py
@@ -309,7 +309,7 @@ class AppRunner:
if not prompt_messages:
prompt_messages = result.prompt_messages
- if not usage and result.delta.usage:
+ if result.delta.usage:
usage = result.delta.usage
if not usage:
diff --git a/api/core/embedding/cached_embedding.py b/api/core/embedding/cached_embedding.py
index 8ce12fd59f..75219051cd 100644
--- a/api/core/embedding/cached_embedding.py
+++ b/api/core/embedding/cached_embedding.py
@@ -5,6 +5,7 @@ from typing import Optional, cast
import numpy as np
from sqlalchemy.exc import IntegrityError
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_manager import ModelInstance
from core.model_runtime.entities.model_entities import ModelPropertyKey
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
@@ -56,7 +57,9 @@ class CacheEmbedding(Embeddings):
for i in range(0, len(embedding_queue_texts), max_chunks):
batch_texts = embedding_queue_texts[i : i + max_chunks]
- embedding_result = self._model_instance.invoke_text_embedding(texts=batch_texts, user=self._user)
+ embedding_result = self._model_instance.invoke_text_embedding(
+ texts=batch_texts, user=self._user, input_type=EmbeddingInputType.DOCUMENT
+ )
for vector in embedding_result.embeddings:
try:
@@ -100,7 +103,9 @@ class CacheEmbedding(Embeddings):
redis_client.expire(embedding_cache_key, 600)
return list(np.frombuffer(base64.b64decode(embedding), dtype="float"))
try:
- embedding_result = self._model_instance.invoke_text_embedding(texts=[text], user=self._user)
+ embedding_result = self._model_instance.invoke_text_embedding(
+ texts=[text], user=self._user, input_type=EmbeddingInputType.QUERY
+ )
embedding_results = embedding_result.embeddings[0]
embedding_results = (embedding_results / np.linalg.norm(embedding_results)).tolist()
diff --git a/api/core/embedding/embedding_constant.py b/api/core/embedding/embedding_constant.py
new file mode 100644
index 0000000000..9b4934646b
--- /dev/null
+++ b/api/core/embedding/embedding_constant.py
@@ -0,0 +1,10 @@
+from enum import Enum
+
+
+class EmbeddingInputType(Enum):
+ """
+ Enum for embedding input type.
+ """
+
+ DOCUMENT = "document"
+ QUERY = "query"
diff --git a/api/core/model_manager.py b/api/core/model_manager.py
index 990efd36c6..74b4452362 100644
--- a/api/core/model_manager.py
+++ b/api/core/model_manager.py
@@ -3,6 +3,7 @@ import os
from collections.abc import Callable, Generator, Sequence
from typing import IO, Optional, Union, cast
+from core.embedding.embedding_constant import EmbeddingInputType
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
from core.entities.provider_entities import ModelLoadBalancingConfiguration
from core.errors.error import ProviderTokenNotInitError
@@ -158,12 +159,15 @@ class ModelInstance:
tools=tools,
)
- def invoke_text_embedding(self, texts: list[str], user: Optional[str] = None) -> TextEmbeddingResult:
+ def invoke_text_embedding(
+ self, texts: list[str], user: Optional[str] = None, input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT
+ ) -> TextEmbeddingResult:
"""
Invoke large language model
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
if not isinstance(self.model_type_instance, TextEmbeddingModel):
@@ -176,6 +180,7 @@ class ModelInstance:
credentials=self.credentials,
texts=texts,
user=user,
+ input_type=input_type,
)
def get_text_embedding_num_tokens(self, texts: list[str]) -> int:
diff --git a/api/core/model_runtime/model_providers/__base/text_embedding_model.py b/api/core/model_runtime/model_providers/__base/text_embedding_model.py
index 54a4486023..a948dca20d 100644
--- a/api/core/model_runtime/model_providers/__base/text_embedding_model.py
+++ b/api/core/model_runtime/model_providers/__base/text_embedding_model.py
@@ -4,6 +4,7 @@ from typing import Optional
from pydantic import ConfigDict
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.model_providers.__base.ai_model import AIModel
@@ -20,35 +21,47 @@ class TextEmbeddingModel(AIModel):
model_config = ConfigDict(protected_namespaces=())
def invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
- Invoke large language model
+ Invoke text embedding model
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
self.started_at = time.perf_counter()
try:
- return self._invoke(model, credentials, texts, user)
+ return self._invoke(model, credentials, texts, user, input_type)
except Exception as e:
raise self._transform_invoke_error(e)
@abstractmethod
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
- Invoke large language model
+ Invoke text embedding model
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
raise NotImplementedError
diff --git a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py
index d9cff8ecbb..8701a38050 100644
--- a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py
@@ -7,6 +7,7 @@ import numpy as np
import tiktoken
from openai import AzureOpenAI
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import AIModelEntity, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
@@ -17,8 +18,23 @@ from core.model_runtime.model_providers.azure_openai._constant import EMBEDDING_
class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel):
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
+ :return: embeddings result
+ """
base_model_name = credentials["base_model_name"]
credentials_kwargs = self._to_credential_kwargs(credentials)
client = AzureOpenAI(**credentials_kwargs)
diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py
index 779dfbb608..56b9be1c36 100644
--- a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
from requests import post
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -35,7 +36,12 @@ class BaichuanTextEmbeddingModel(TextEmbeddingModel):
api_base: str = "http://api.baichuan-ai.com/v1/embeddings"
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -44,6 +50,7 @@ class BaichuanTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
api_key = credentials["api_key"]
diff --git a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py
index 251170d1ae..d9c5726592 100644
--- a/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/bedrock/text_embedding/text_embedding.py
@@ -13,6 +13,7 @@ from botocore.exceptions import (
UnknownServiceError,
)
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -30,7 +31,12 @@ logger = logging.getLogger(__name__)
class BedrockTextEmbeddingModel(TextEmbeddingModel):
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -39,6 +45,7 @@ class BedrockTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
client_config = Config(region_name=credentials["aws_region"])
diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py
index a1c5e98118..4da2080690 100644
--- a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py
@@ -5,6 +5,7 @@ import cohere
import numpy as np
from cohere.core import RequestOptions
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -25,7 +26,12 @@ class CohereTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -34,6 +40,7 @@ class CohereTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
# get model properties
diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml
index f886fa23b5..cdb87a55e9 100644
--- a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml
+++ b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml
@@ -15,6 +15,7 @@ help:
en_US: https://fireworks.ai/account/api-keys
supported_model_types:
- llm
+ - text-embedding
configurate_methods:
- predefined-model
provider_credential_schema:
diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml
new file mode 100644
index 0000000000..31415a24fa
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml
@@ -0,0 +1,46 @@
+model: accounts/fireworks/models/llama-v3p2-11b-vision-instruct
+label:
+ zh_Hans: Llama 3.2 11B Vision Instruct
+ en_US: Llama 3.2 11B Vision Instruct
+model_type: llm
+features:
+ - agent-thought
+ - tool-call
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ - name: max_tokens
+ use_template: max_tokens
+ - name: context_length_exceeded_behavior
+ default: None
+ label:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ help:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ type: string
+ options:
+ - None
+ - truncate
+ - error
+ - name: response_format
+ use_template: response_format
+pricing:
+ input: '0.2'
+ output: '0.2'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml
new file mode 100644
index 0000000000..c2fd77d256
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml
@@ -0,0 +1,46 @@
+model: accounts/fireworks/models/llama-v3p2-1b-instruct
+label:
+ zh_Hans: Llama 3.2 1B Instruct
+ en_US: Llama 3.2 1B Instruct
+model_type: llm
+features:
+ - agent-thought
+ - tool-call
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ - name: max_tokens
+ use_template: max_tokens
+ - name: context_length_exceeded_behavior
+ default: None
+ label:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ help:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ type: string
+ options:
+ - None
+ - truncate
+ - error
+ - name: response_format
+ use_template: response_format
+pricing:
+ input: '0.1'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml
new file mode 100644
index 0000000000..4b3c459c7b
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml
@@ -0,0 +1,46 @@
+model: accounts/fireworks/models/llama-v3p2-3b-instruct
+label:
+ zh_Hans: Llama 3.2 3B Instruct
+ en_US: Llama 3.2 3B Instruct
+model_type: llm
+features:
+ - agent-thought
+ - tool-call
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ - name: max_tokens
+ use_template: max_tokens
+ - name: context_length_exceeded_behavior
+ default: None
+ label:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ help:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ type: string
+ options:
+ - None
+ - truncate
+ - error
+ - name: response_format
+ use_template: response_format
+pricing:
+ input: '0.1'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml
new file mode 100644
index 0000000000..0aece7455d
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml
@@ -0,0 +1,46 @@
+model: accounts/fireworks/models/llama-v3p2-90b-vision-instruct
+label:
+ zh_Hans: Llama 3.2 90B Vision Instruct
+ en_US: Llama 3.2 90B Vision Instruct
+model_type: llm
+features:
+ - agent-thought
+ - tool-call
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ - name: max_tokens
+ use_template: max_tokens
+ - name: context_length_exceeded_behavior
+ default: None
+ label:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ help:
+ zh_Hans: 上下文长度超出行为
+ en_US: Context Length Exceeded Behavior
+ type: string
+ options:
+ - None
+ - truncate
+ - error
+ - name: response_format
+ use_template: response_format
+pricing:
+ input: '0.9'
+ output: '0.9'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml
new file mode 100644
index 0000000000..d7c11691cf
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml
@@ -0,0 +1,12 @@
+model: WhereIsAI/UAE-Large-V1
+label:
+ zh_Hans: UAE-Large-V1
+ en_US: UAE-Large-V1
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
+pricing:
+ input: '0.008'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml
new file mode 100644
index 0000000000..d09bafb4d3
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml
@@ -0,0 +1,12 @@
+model: thenlper/gte-base
+label:
+ zh_Hans: GTE-base
+ en_US: GTE-base
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
+pricing:
+ input: '0.008'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml
new file mode 100644
index 0000000000..c41fa2f9d3
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml
@@ -0,0 +1,12 @@
+model: thenlper/gte-large
+label:
+ zh_Hans: GTE-large
+ en_US: GTE-large
+model_type: text-embedding
+model_properties:
+ context_size: 512
+ max_chunks: 1
+pricing:
+ input: '0.008'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml
new file mode 100644
index 0000000000..c9098503d9
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml
@@ -0,0 +1,12 @@
+model: nomic-ai/nomic-embed-text-v1.5
+label:
+ zh_Hans: nomic-embed-text-v1.5
+ en_US: nomic-embed-text-v1.5
+model_type: text-embedding
+model_properties:
+ context_size: 8192
+ max_chunks: 16
+pricing:
+ input: '0.008'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml
new file mode 100644
index 0000000000..89078d3ff6
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml
@@ -0,0 +1,12 @@
+model: nomic-ai/nomic-embed-text-v1
+label:
+ zh_Hans: nomic-embed-text-v1
+ en_US: nomic-embed-text-v1
+model_type: text-embedding
+model_properties:
+ context_size: 8192
+ max_chunks: 16
+pricing:
+ input: '0.008'
+ unit: '0.000001'
+ currency: 'USD'
diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py
new file mode 100644
index 0000000000..cdce69ff38
--- /dev/null
+++ b/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py
@@ -0,0 +1,151 @@
+import time
+from collections.abc import Mapping
+from typing import Optional, Union
+
+import numpy as np
+from openai import OpenAI
+
+from core.embedding.embedding_constant import EmbeddingInputType
+from core.model_runtime.entities.model_entities import PriceType
+from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
+from core.model_runtime.model_providers.fireworks._common import _CommonFireworks
+
+
+class FireworksTextEmbeddingModel(_CommonFireworks, TextEmbeddingModel):
+ """
+ Model class for Fireworks text embedding model.
+ """
+
+ def _invoke(
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
+ ) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
+ :return: embeddings result
+ """
+
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ extra_model_kwargs = {}
+ if user:
+ extra_model_kwargs["user"] = user
+
+ extra_model_kwargs["encoding_format"] = "float"
+
+ context_size = self._get_context_size(model, credentials)
+ max_chunks = self._get_max_chunks(model, credentials)
+
+ inputs = []
+ indices = []
+ used_tokens = 0
+
+ for i, text in enumerate(texts):
+ # Here token count is only an approximation based on the GPT2 tokenizer
+ # TODO: Optimize for better token estimation and chunking
+ num_tokens = self._get_num_tokens_by_gpt2(text)
+
+ if num_tokens >= context_size:
+ cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
+ # if num tokens is larger than context length, only use the start
+ inputs.append(text[0:cutoff])
+ else:
+ inputs.append(text)
+ indices += [i]
+
+ batched_embeddings = []
+ _iter = range(0, len(inputs), max_chunks)
+
+ for i in _iter:
+ embeddings_batch, embedding_used_tokens = self._embedding_invoke(
+ model=model,
+ client=client,
+ texts=inputs[i : i + max_chunks],
+ extra_model_kwargs=extra_model_kwargs,
+ )
+ used_tokens += embedding_used_tokens
+ batched_embeddings += embeddings_batch
+
+ usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
+ return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model)
+
+ def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
+ """
+ Get number of tokens for given prompt messages
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :return:
+ """
+ return sum(self._get_num_tokens_by_gpt2(text) for text in texts)
+
+ def validate_credentials(self, model: str, credentials: Mapping) -> None:
+ """
+ Validate model credentials
+
+ :param model: model name
+ :param credentials: model credentials
+ :return:
+ """
+ try:
+ # transform credentials to kwargs for model instance
+ credentials_kwargs = self._to_credential_kwargs(credentials)
+ client = OpenAI(**credentials_kwargs)
+
+ # call embedding model
+ self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={})
+ except Exception as ex:
+ raise CredentialsValidateFailedError(str(ex))
+
+ def _embedding_invoke(
+ self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict
+ ) -> tuple[list[list[float]], int]:
+ """
+ Invoke embedding model
+ :param model: model name
+ :param client: model client
+ :param texts: texts to embed
+ :param extra_model_kwargs: extra model kwargs
+ :return: embeddings and used tokens
+ """
+ response = client.embeddings.create(model=model, input=texts, **extra_model_kwargs)
+ return [data.embedding for data in response.data], response.usage.total_tokens
+
+ def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
+ """
+ Calculate response usage
+
+ :param model: model name
+ :param credentials: model credentials
+ :param tokens: input tokens
+ :return: usage
+ """
+ input_price_info = self.get_price(
+ model=model, credentials=credentials, tokens=tokens, price_type=PriceType.INPUT
+ )
+
+ usage = EmbeddingUsage(
+ tokens=tokens,
+ total_tokens=tokens,
+ unit_price=input_price_info.unit_price,
+ price_unit=input_price_info.unit,
+ total_price=input_price_info.total_amount,
+ currency=input_price_info.currency,
+ latency=time.perf_counter() - self.started_at,
+ )
+
+ return usage
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml
new file mode 100644
index 0000000000..d84e9937e0
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-flash-001
+label:
+ en_US: Gemini 1.5 Flash 001
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml
new file mode 100644
index 0000000000..2ff70564b2
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-flash-002
+label:
+ en_US: Gemini 1.5 Flash 002
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml
index bbc697e934..4e0209890a 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0827.yaml
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml
new file mode 100644
index 0000000000..2aea8149f4
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-flash-8b-exp-0924
+label:
+ en_US: Gemini 1.5 Flash 8B 0924
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml
index c5695e5dda..faabc5e4d1 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-exp-0827.yaml
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml
index 24b1c5af8a..a22fcca941 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-latest.yaml
@@ -1,6 +1,6 @@
model: gemini-1.5-flash-latest
label:
- en_US: Gemini 1.5 Flash
+ en_US: Gemini 1.5 Flash Latest
model_type: llm
features:
- agent-thought
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml
new file mode 100644
index 0000000000..dfd55c3a94
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-flash
+label:
+ en_US: Gemini 1.5 Flash
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml
new file mode 100644
index 0000000000..a1feff171d
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-pro-001
+label:
+ en_US: Gemini 1.5 Pro 001
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 2097152
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml
new file mode 100644
index 0000000000..9ae07a06c5
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-pro-002
+label:
+ en_US: Gemini 1.5 Pro 002
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 2097152
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml
index 0a918e0d7b..97c68f7a18 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0801.yaml
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml
index 7452ce46e7..860e4816a1 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-exp-0827.yaml
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml
index b3e1ecf3af..d1bf7d269d 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-latest.yaml
@@ -1,6 +1,6 @@
model: gemini-1.5-pro-latest
label:
- en_US: Gemini 1.5 Pro
+ en_US: Gemini 1.5 Pro Latest
model_type: llm
features:
- agent-thought
@@ -32,6 +32,15 @@ parameter_rules:
max: 8192
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml
new file mode 100644
index 0000000000..bdd70b34a2
--- /dev/null
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml
@@ -0,0 +1,48 @@
+model: gemini-1.5-pro
+label:
+ en_US: Gemini 1.5 Pro
+model_type: llm
+features:
+ - agent-thought
+ - vision
+ - tool-call
+ - stream-tool-call
+model_properties:
+ mode: chat
+ context_size: 2097152
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ zh_Hans: 取样数量
+ en_US: Top k
+ type: int
+ help:
+ zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: max_tokens_to_sample
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+ - name: response_format
+ use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml
index 075e484e46..2d213d56ad 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-pro-vision.yaml
@@ -27,6 +27,15 @@ parameter_rules:
default: 4096
min: 1
max: 4096
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml
index 4e9f59e7da..e2f487c1ee 100644
--- a/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml
+++ b/api/core/model_runtime/model_providers/google/llm/gemini-pro.yaml
@@ -31,6 +31,15 @@ parameter_rules:
max: 2048
- name: response_format
use_template: response_format
+ - name: stream
+ label:
+ zh_Hans: 流式输出
+ en_US: Stream
+ type: boolean
+ help:
+ zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。
+ en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once.
+ default: false
pricing:
input: '0.00'
output: '0.00'
diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py
index 3fc6787a44..e686ad08d9 100644
--- a/api/core/model_runtime/model_providers/google/llm/llm.py
+++ b/api/core/model_runtime/model_providers/google/llm/llm.py
@@ -9,8 +9,8 @@ import google.ai.generativelanguage as glm
import google.generativeai as genai
import requests
from google.api_core import exceptions
-from google.generativeai import client
-from google.generativeai.types import ContentType, GenerateContentResponse, HarmBlockThreshold, HarmCategory
+from google.generativeai.client import _ClientManager
+from google.generativeai.types import ContentType, GenerateContentResponse
from google.generativeai.types.content_types import to_part
from PIL import Image
@@ -200,24 +200,16 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
history.append(content)
# Create a new ClientManager with tenant's API key
- new_client_manager = client._ClientManager()
+ new_client_manager = _ClientManager()
new_client_manager.configure(api_key=credentials["google_api_key"])
new_custom_client = new_client_manager.make_client("generative")
google_model._client = new_custom_client
- safety_settings = {
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
- }
-
response = google_model.generate_content(
contents=history,
generation_config=genai.types.GenerationConfig(**config_kwargs),
stream=stream,
- safety_settings=safety_settings,
tools=self._convert_tools_to_glm_tool(tools) if tools else None,
request_options={"timeout": 600},
)
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml
new file mode 100644
index 0000000000..019d453723
--- /dev/null
+++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml
@@ -0,0 +1,25 @@
+model: llama-3.2-11b-text-preview
+label:
+ zh_Hans: Llama 3.2 11B Text (Preview)
+ en_US: Llama 3.2 11B Text (Preview)
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 8192
+pricing:
+ input: '0.05'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml
new file mode 100644
index 0000000000..a44e4ff508
--- /dev/null
+++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml
@@ -0,0 +1,25 @@
+model: llama-3.2-1b-preview
+label:
+ zh_Hans: Llama 3.2 1B Text (Preview)
+ en_US: Llama 3.2 1B Text (Preview)
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 8192
+pricing:
+ input: '0.05'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml
new file mode 100644
index 0000000000..f2fdd0a05e
--- /dev/null
+++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml
@@ -0,0 +1,25 @@
+model: llama-3.2-3b-preview
+label:
+ zh_Hans: Llama 3.2 3B Text (Preview)
+ en_US: Llama 3.2 3B Text (Preview)
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 8192
+pricing:
+ input: '0.05'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml
new file mode 100644
index 0000000000..3b34e7c079
--- /dev/null
+++ b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml
@@ -0,0 +1,25 @@
+model: llama-3.2-90b-text-preview
+label:
+ zh_Hans: Llama 3.2 90B Text (Preview)
+ en_US: Llama 3.2 90B Text (Preview)
+model_type: llm
+features:
+ - agent-thought
+model_properties:
+ mode: chat
+ context_size: 131072
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: max_tokens
+ use_template: max_tokens
+ default: 512
+ min: 1
+ max: 8192
+pricing:
+ input: '0.05'
+ output: '0.1'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py
index 4ad96c4233..b2e6d1b652 100644
--- a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py
@@ -6,6 +6,7 @@ import numpy as np
import requests
from huggingface_hub import HfApi, InferenceClient
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -18,8 +19,23 @@ HUGGINGFACE_ENDPOINT_API = "https://api.endpoints.huggingface.cloud/v2/endpoint/
class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel):
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
+ :return: embeddings result
+ """
client = InferenceClient(token=credentials["huggingfacehub_api_token"])
execute_model = model
diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
index 55f3c25804..b8ff3ca549 100644
--- a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py
@@ -1,6 +1,7 @@
import time
from typing import Optional
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -23,7 +24,12 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -38,6 +44,7 @@ class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
server_url = credentials["server_url"]
diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py
index 1396e59e18..75701ebc54 100644
--- a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py
@@ -9,6 +9,7 @@ from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -26,7 +27,12 @@ class HunyuanTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -35,6 +41,7 @@ class HunyuanTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml
index 4ff6ba0f22..970b22965b 100644
--- a/api/core/model_runtime/model_providers/jina/jina.yaml
+++ b/api/core/model_runtime/model_providers/jina/jina.yaml
@@ -67,46 +67,3 @@ model_credential_schema:
required: false
type: text-input
default: '8192'
- - variable: task
- label:
- zh_Hans: 下游任务
- en_US: Downstream task
- placeholder:
- zh_Hans: 选择将使用向量模型的下游任务。模型将返回针对该任务优化的向量。
- en_US: Select the downstream task for which the embeddings will be used. The model will return the optimized embeddings for that task.
- required: false
- type: select
- options:
- - value: retrieval.query
- label:
- en_US: retrieval.query
- - value: retrieval.passage
- label:
- en_US: retrieval.passage
- - value: separation
- label:
- en_US: separation
- - value: classification
- label:
- en_US: classification
- - value: text-matching
- label:
- en_US: text-matching
- - variable: dimensions
- label:
- zh_Hans: 输出维度
- en_US: Output dimensions
- placeholder:
- zh_Hans: 输入您的输出维度
- en_US: Enter output dimensions
- required: false
- type: text-input
- - variable: late_chunking
- label:
- zh_Hans: 后期分块
- en_US: Late chunking
- placeholder:
- zh_Hans: 应用后期分块技术来利用模型的长上下文功能来生成上下文块向量化。
- en_US: Apply the late chunking technique to leverage the model's long-context capabilities for generating contextual chunk embeddings.
- required: false
- type: switch
diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py
index 6c96699ea2..b397129512 100644
--- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
from requests import post
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -27,7 +28,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
api_base: str = "https://api.jina.ai/v1"
- def _to_payload(self, model: str, texts: list[str], credentials: dict) -> dict:
+ def _to_payload(self, model: str, texts: list[str], credentials: dict, input_type: EmbeddingInputType) -> dict:
"""
Parse model credentials
@@ -44,23 +45,20 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]}
- task = credentials.get("task")
- dimensions = credentials.get("dimensions")
- late_chunking = credentials.get("late_chunking")
-
- if task is not None:
- data["task"] = task
-
- if dimensions is not None:
- data["dimensions"] = int(dimensions)
-
- if late_chunking is not None:
- data["late_chunking"] = late_chunking
+ # model specific parameters
+ if model == "jina-embeddings-v3":
+ # set `task` type according to input type for the best performance
+ data["task"] = "retrieval.query" if input_type == EmbeddingInputType.QUERY else "retrieval.passage"
return data
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -69,6 +67,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
api_key = credentials["api_key"]
@@ -81,7 +80,7 @@ class JinaTextEmbeddingModel(TextEmbeddingModel):
url = base_url + "/embeddings"
headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"}
- data = self._to_payload(model=model, texts=texts, credentials=credentials)
+ data = self._to_payload(model=model, texts=texts, credentials=credentials, input_type=input_type)
try:
response = post(url, headers=headers, data=dumps(data))
diff --git a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py
index 7d258be81e..ab8ca76c2f 100644
--- a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py
@@ -5,6 +5,7 @@ from typing import Optional
from requests import post
from yarl import URL
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -22,11 +23,16 @@ from core.model_runtime.model_providers.__base.text_embedding_model import TextE
class LocalAITextEmbeddingModel(TextEmbeddingModel):
"""
- Model class for Jina text embedding model.
+ Model class for LocalAI text embedding model.
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -35,6 +41,7 @@ class LocalAITextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
if len(texts) != 1:
diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py
index 76fd1342bd..74d2a221d1 100644
--- a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
from requests import post
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -34,7 +35,12 @@ class MinimaxTextEmbeddingModel(TextEmbeddingModel):
api_base: str = "https://api.minimax.chat/v1/embeddings"
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -43,6 +49,7 @@ class MinimaxTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
api_key = credentials["minimax_api_key"]
diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py
index 05d9a9a0c6..68b7b448bf 100644
--- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
import requests
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -27,7 +28,12 @@ class MixedBreadTextEmbeddingModel(TextEmbeddingModel):
api_base: str = "https://api.mixedbread.ai/v1"
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -36,6 +42,7 @@ class MixedBreadTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
api_key = credentials["api_key"]
diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
index ccbfd196a9..857dfb5f41 100644
--- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py
@@ -5,6 +5,7 @@ from typing import Optional
from nomic import embed
from nomic import login as nomic_login
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import (
EmbeddingUsage,
@@ -46,6 +47,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel):
credentials: dict,
texts: list[str],
user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -54,6 +56,7 @@ class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
embeddings, prompt_tokens, total_tokens = self.embed_text(
diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py
index 00cec265d5..936ceb8dd2 100644
--- a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
from requests import post
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -27,7 +28,12 @@ class NvidiaTextEmbeddingModel(TextEmbeddingModel):
models: list[str] = ["NV-Embed-QA"]
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -36,6 +42,7 @@ class NvidiaTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
api_key = credentials["api_key"]
diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py
index 80ad2be9f5..4de9296cca 100644
--- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py
@@ -6,6 +6,7 @@ from typing import Optional
import numpy as np
import oci
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -41,7 +42,12 @@ class OCITextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -50,6 +56,7 @@ class OCITextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
# get model properties
diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py
index ff732e6925..a7ea53e0e9 100644
--- a/api/core/model_runtime/model_providers/ollama/llm/llm.py
+++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py
@@ -364,14 +364,21 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
if chunk_json["done"]:
# calculate num tokens
- if "prompt_eval_count" in chunk_json and "eval_count" in chunk_json:
- # transform usage
+ if "prompt_eval_count" in chunk_json:
prompt_tokens = chunk_json["prompt_eval_count"]
- completion_tokens = chunk_json["eval_count"]
else:
- # calculate num tokens
- prompt_tokens = self._get_num_tokens_by_gpt2(prompt_messages[0].content)
- completion_tokens = self._get_num_tokens_by_gpt2(full_text)
+ prompt_message_content = prompt_messages[0].content
+ if isinstance(prompt_message_content, str):
+ prompt_tokens = self._get_num_tokens_by_gpt2(prompt_message_content)
+ else:
+ content_text = ""
+ for message_content in prompt_message_content:
+ if message_content.type == PromptMessageContentType.TEXT:
+ message_content = cast(TextPromptMessageContent, message_content)
+ content_text += message_content.data
+ prompt_tokens = self._get_num_tokens_by_gpt2(content_text)
+
+ completion_tokens = chunk_json.get("eval_count", self._get_num_tokens_by_gpt2(full_text))
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
index b4c61d8a6d..5cf3f1c6fa 100644
--- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py
@@ -8,6 +8,7 @@ from urllib.parse import urljoin
import numpy as np
import requests
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@@ -38,7 +39,12 @@ class OllamaEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -47,6 +53,7 @@ class OllamaEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py
index 535d8388bc..16f1a0cfa1 100644
--- a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py
@@ -6,6 +6,7 @@ import numpy as np
import tiktoken
from openai import OpenAI
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
@@ -19,7 +20,12 @@ class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -28,6 +34,7 @@ class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
# transform credentials to kwargs for model instance
diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
index e83cfdf873..64fa6aaa3c 100644
--- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py
@@ -7,6 +7,7 @@ from urllib.parse import urljoin
import numpy as np
import requests
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@@ -28,7 +29,12 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -37,6 +43,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py
index 00e583cc79..c5d4330912 100644
--- a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py
@@ -5,6 +5,7 @@ from typing import Optional
from requests import post
from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import (
@@ -25,7 +26,12 @@ class OpenLLMTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -34,6 +40,7 @@ class OpenLLMTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
server_url = credentials["server_url"]
diff --git a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
index b62a2d2aaf..1e86f351c8 100644
--- a/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/perfxcloud/text_embedding/text_embedding.py
@@ -7,6 +7,7 @@ from urllib.parse import urljoin
import numpy as np
import requests
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@@ -28,7 +29,12 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -37,6 +43,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py
index 71b6fb99c4..9f724a77ac 100644
--- a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
from replicate import Client as ReplicateClient
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -14,8 +15,23 @@ from core.model_runtime.model_providers.replicate._common import _CommonReplicat
class ReplicateEmbeddingModel(_CommonReplicate, TextEmbeddingModel):
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
+ :return: embeddings result
+ """
client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30)
if "model_version" in credentials:
diff --git a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py
index 04789197ee..97b7692044 100644
--- a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py
+++ b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py
@@ -84,8 +84,9 @@ class SageMakerLargeLanguageModel(LargeLanguageModel):
Model class for Cohere large language model.
"""
- sagemaker_client: Any = None
+ sagemaker_session: Any = None
predictor: Any = None
+ sagemaker_endpoint: str = None
def _handle_chat_generate_response(
self,
@@ -211,7 +212,7 @@ class SageMakerLargeLanguageModel(LargeLanguageModel):
:param user: unique user id
:return: full response or stream response chunk generator result
"""
- if not self.sagemaker_client:
+ if not self.sagemaker_session:
access_key = credentials.get("aws_access_key_id")
secret_key = credentials.get("aws_secret_access_key")
aws_region = credentials.get("aws_region")
@@ -226,11 +227,14 @@ class SageMakerLargeLanguageModel(LargeLanguageModel):
else:
boto_session = boto3.Session()
- self.sagemaker_client = boto_session.client("sagemaker")
- sagemaker_session = Session(boto_session=boto_session, sagemaker_client=self.sagemaker_client)
+ sagemaker_client = boto_session.client("sagemaker")
+ self.sagemaker_session = Session(boto_session=boto_session, sagemaker_client=sagemaker_client)
+
+ if self.sagemaker_endpoint != credentials.get("sagemaker_endpoint"):
+ self.sagemaker_endpoint = credentials.get("sagemaker_endpoint")
self.predictor = Predictor(
- endpoint_name=credentials.get("sagemaker_endpoint"),
- sagemaker_session=sagemaker_session,
+ endpoint_name=self.sagemaker_endpoint,
+ sagemaker_session=self.sagemaker_session,
serializer=serializers.JSONSerializer(),
)
diff --git a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py
index d55144f8a7..8f993ce672 100644
--- a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py
@@ -6,6 +6,7 @@ from typing import Any, Optional
import boto3
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -53,7 +54,12 @@ class SageMakerEmbeddingModel(TextEmbeddingModel):
return embeddings
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -62,6 +68,7 @@ class SageMakerEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
# get model properties
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
index 43db4aed11..a3e5d0981f 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml
@@ -1,25 +1,38 @@
-- Qwen/Qwen2.5-7B-Instruct
-- Qwen/Qwen2.5-14B-Instruct
-- Qwen/Qwen2.5-32B-Instruct
- Qwen/Qwen2.5-72B-Instruct
+- Qwen/Qwen2.5-Math-72B-Instruct
+- Qwen/Qwen2.5-32B-Instruct
+- Qwen/Qwen2.5-14B-Instruct
+- Qwen/Qwen2.5-7B-Instruct
+- Qwen/Qwen2.5-Coder-7B-Instruct
+- deepseek-ai/DeepSeek-V2.5
- Qwen/Qwen2-72B-Instruct
- Qwen/Qwen2-57B-A14B-Instruct
- Qwen/Qwen2-7B-Instruct
- Qwen/Qwen2-1.5B-Instruct
-- 01-ai/Yi-1.5-34B-Chat
-- 01-ai/Yi-1.5-9B-Chat-16K
-- 01-ai/Yi-1.5-6B-Chat
-- THUDM/glm-4-9b-chat
-- deepseek-ai/DeepSeek-V2.5
- deepseek-ai/DeepSeek-V2-Chat
- deepseek-ai/DeepSeek-Coder-V2-Instruct
+- THUDM/glm-4-9b-chat
+- THUDM/chatglm3-6b
+- 01-ai/Yi-1.5-34B-Chat-16K
+- 01-ai/Yi-1.5-9B-Chat-16K
+- 01-ai/Yi-1.5-6B-Chat
+- internlm/internlm2_5-20b-chat
- internlm/internlm2_5-7b-chat
-- google/gemma-2-27b-it
-- google/gemma-2-9b-it
-- meta-llama/Meta-Llama-3-70B-Instruct
-- meta-llama/Meta-Llama-3-8B-Instruct
- meta-llama/Meta-Llama-3.1-405B-Instruct
- meta-llama/Meta-Llama-3.1-70B-Instruct
- meta-llama/Meta-Llama-3.1-8B-Instruct
-- mistralai/Mixtral-8x7B-Instruct-v0.1
+- meta-llama/Meta-Llama-3-70B-Instruct
+- meta-llama/Meta-Llama-3-8B-Instruct
+- google/gemma-2-27b-it
+- google/gemma-2-9b-it
- mistralai/Mistral-7B-Instruct-v0.2
+- Pro/Qwen/Qwen2-7B-Instruct
+- Pro/Qwen/Qwen2-1.5B-Instruct
+- Pro/THUDM/glm-4-9b-chat
+- Pro/THUDM/chatglm3-6b
+- Pro/01-ai/Yi-1.5-9B-Chat-16K
+- Pro/01-ai/Yi-1.5-6B-Chat
+- Pro/internlm/internlm2_5-7b-chat
+- Pro/meta-llama/Meta-Llama-3.1-8B-Instruct
+- Pro/meta-llama/Meta-Llama-3-8B-Instruct
+- Pro/google/gemma-2-9b-it
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml
index 27664eab6c..89fb153ba0 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-7b-instruct-v0.2.yaml
@@ -28,3 +28,4 @@ pricing:
output: '0'
unit: '0.000001'
currency: RMB
+deprecated: true
diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml
index fd7aada428..2785e7496f 100644
--- a/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml
+++ b/api/core/model_runtime/model_providers/siliconflow/llm/mistral-8x7b-instruct-v0.1.yaml
@@ -28,3 +28,4 @@ pricing:
output: '1.26'
unit: '0.000001'
currency: RMB
+deprecated: true
diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py
index 6cdf4933b4..c5dcc12610 100644
--- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py
@@ -1,5 +1,6 @@
from typing import Optional
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
OAICompatEmbeddingModel,
@@ -16,8 +17,23 @@ class SiliconflowTextEmbeddingModel(OAICompatEmbeddingModel):
super().validate_credentials(model, credentials)
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
+ """
+ Invoke text embedding model
+
+ :param model: model name
+ :param credentials: model credentials
+ :param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
+ :return: embeddings result
+ """
self._add_custom_parameters(credentials)
return super()._invoke(model, credentials, texts, user)
diff --git a/api/core/model_runtime/model_providers/spark/llm/llm.py b/api/core/model_runtime/model_providers/spark/llm/llm.py
index 57193dc031..1181ba699a 100644
--- a/api/core/model_runtime/model_providers/spark/llm/llm.py
+++ b/api/core/model_runtime/model_providers/spark/llm/llm.py
@@ -213,18 +213,21 @@ class SparkLargeLanguageModel(LargeLanguageModel):
:param prompt_messages: prompt messages
:return: llm response chunk generator result
"""
+ completion = ""
for index, content in enumerate(client.subscribe()):
if isinstance(content, dict):
delta = content["data"]
else:
delta = content
-
+ completion += delta
assistant_prompt_message = AssistantPromptMessage(
content=delta or "",
)
-
+ temp_assistant_prompt_message = AssistantPromptMessage(
+ content=completion,
+ )
prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages)
- completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message])
+ completion_tokens = self.get_num_tokens(model, credentials, [temp_assistant_prompt_message])
# transform usage
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
index d0ff443827..34a57d1fc0 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/farui-plus.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: farui-plus
label:
en_US: farui-plus
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/llm.py b/api/core/model_runtime/model_providers/tongyi/llm/llm.py
index f90c7f075f..3e3585b30a 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/llm.py
+++ b/api/core/model_runtime/model_providers/tongyi/llm/llm.py
@@ -18,7 +18,7 @@ from dashscope.common.error import (
UnsupportedModel,
)
-from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
+from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (
AssistantPromptMessage,
ImagePromptMessageContent,
@@ -35,6 +35,7 @@ from core.model_runtime.entities.model_entities import (
FetchFrom,
I18nObject,
ModelFeature,
+ ModelPropertyKey,
ModelType,
ParameterRule,
ParameterType,
@@ -97,6 +98,11 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
:param tools: tools for tool calling
:return:
"""
+ # Check if the model was added via get_customizable_model_schema
+ if self.get_customizable_model_schema(model, credentials) is not None:
+ # For custom models, tokens are not calculated.
+ return 0
+
if model in {"qwen-turbo-chat", "qwen-plus-chat"}:
model = model.replace("-chat", "")
if model == "farui-plus":
@@ -537,55 +543,51 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
:param credentials: model credentials
:return: AIModelEntity or None
"""
- rules = [
- ParameterRule(
- name="temperature",
- type=ParameterType.FLOAT,
- use_template="temperature",
- label=I18nObject(zh_Hans="温度", en_US="Temperature"),
- ),
- ParameterRule(
- name="top_p",
- type=ParameterType.FLOAT,
- use_template="top_p",
- label=I18nObject(zh_Hans="Top P", en_US="Top P"),
- ),
- ParameterRule(
- name="top_k",
- type=ParameterType.INT,
- min=0,
- max=99,
- label=I18nObject(zh_Hans="top_k", en_US="top_k"),
- ),
- ParameterRule(
- name="max_tokens",
- type=ParameterType.INT,
- min=1,
- max=128000,
- default=1024,
- label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"),
- ),
- ParameterRule(
- name="seed",
- type=ParameterType.INT,
- default=1234,
- label=I18nObject(zh_Hans="随机种子", en_US="Random Seed"),
- ),
- ParameterRule(
- name="repetition_penalty",
- type=ParameterType.FLOAT,
- default=1.1,
- label=I18nObject(zh_Hans="重复惩罚", en_US="Repetition Penalty"),
- ),
- ]
-
- entity = AIModelEntity(
+ return AIModelEntity(
model=model,
- label=I18nObject(en_US=model),
- fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ label=I18nObject(en_US=model, zh_Hans=model),
model_type=ModelType.LLM,
- model_properties={},
- parameter_rules=rules,
+ features=[ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL]
+ if credentials.get("function_calling_type") == "tool_call"
+ else [],
+ fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
+ model_properties={
+ ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 8000)),
+ ModelPropertyKey.MODE: LLMMode.CHAT.value,
+ },
+ parameter_rules=[
+ ParameterRule(
+ name="temperature",
+ use_template="temperature",
+ label=I18nObject(en_US="Temperature", zh_Hans="温度"),
+ type=ParameterType.FLOAT,
+ ),
+ ParameterRule(
+ name="max_tokens",
+ use_template="max_tokens",
+ default=512,
+ min=1,
+ max=int(credentials.get("max_tokens", 1024)),
+ label=I18nObject(en_US="Max Tokens", zh_Hans="最大标记"),
+ type=ParameterType.INT,
+ ),
+ ParameterRule(
+ name="top_p",
+ use_template="top_p",
+ label=I18nObject(en_US="Top P", zh_Hans="Top P"),
+ type=ParameterType.FLOAT,
+ ),
+ ParameterRule(
+ name="top_k",
+ use_template="top_k",
+ label=I18nObject(en_US="Top K", zh_Hans="Top K"),
+ type=ParameterType.FLOAT,
+ ),
+ ParameterRule(
+ name="frequency_penalty",
+ use_template="frequency_penalty",
+ label=I18nObject(en_US="Frequency Penalty", zh_Hans="重复惩罚"),
+ type=ParameterType.FLOAT,
+ ),
+ ],
)
-
- return entity
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
index d9792e71ee..64a3f33133 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-0919.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-coder-turbo-0919
label:
en_US: qwen-coder-turbo-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
index 0b03505c45..a4c93f7047 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo-latest.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-coder-turbo-latest
label:
en_US: qwen-coder-turbo-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
index 2a6c040853..ff68faed80 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-coder-turbo.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-coder-turbo
label:
en_US: qwen-coder-turbo
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
index bad7f4f472..c3dbb3616f 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-long.yaml
@@ -1,4 +1,4 @@
-# model docs: https://help.aliyun.com/zh/model-studio/getting-started/models#27b2b3a15d5c6
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-long
label:
en_US: qwen-long
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
index c14aee1e1e..42fe1f6862 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0816.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-plus-0816
label:
en_US: qwen-math-plus-0816
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
index 9d74eeca3e..9b6567b8cd 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-0919.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-plus-0919
label:
en_US: qwen-math-plus-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
index b8601a969a..b2a2393b36 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus-latest.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-plus-latest
label:
en_US: qwen-math-plus-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
index 4a948be597..63f4b7ff0a 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-plus.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-plus
label:
en_US: qwen-math-plus
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
index bffe324a96..4da90eec3e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-0919.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-turbo-0919
label:
en_US: qwen-math-turbo-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
index 0747e96614..d29f8851dd 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo-latest.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-turbo-latest
label:
en_US: qwen-math-turbo-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
index dffb5557ff..2a8f7f725e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-math-turbo.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-math-turbo
label:
en_US: qwen-math-turbo
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
index 8ae159f1bf..ef1841b517 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0107.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-0107
label:
en_US: qwen-max-0107
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
index 93fb37254e..a2ea5df130 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0403.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max-0403, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-0403
label:
en_US: qwen-max-0403
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
index a5c9d49609..a467665f11 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0428.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max-0428, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-0428
label:
en_US: qwen-max-0428
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
index e4a6dae637..78661eaea0 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-0919.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max-0919, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-0919
label:
en_US: qwen-max-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
index 6fae8a7d38..6f4674576b 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-1201
label:
en_US: qwen-max-1201
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
index 8e20968859..8b5f005473 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-latest.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-latest
label:
en_US: qwen-max-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
index 9bc50c73fc..098494ff95 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max-longcontext
label:
en_US: qwen-max-longcontext
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml
index c6a64dc507..9d0d3f8db3 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-max, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#cf6cc4aa2aokf)
model: qwen-max
label:
en_US: qwen-max
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
index 430599300b..0b1a6f81df 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0206.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-0206, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-0206
label:
en_US: qwen-plus-0206
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
index 906995d2b9..7706005bb5 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0624.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-0624, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-0624
label:
en_US: qwen-plus-0624
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
index b33e725dd0..348276fc08 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0723.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-0723, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-0723
label:
en_US: qwen-plus-0723
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
index bb394fad81..29f125135e 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0806.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-0806, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-0806
label:
en_US: qwen-plus-0806
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
index 118e304a97..905fa1e102 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-0919.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-0919, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-0919
label:
en_US: qwen-plus-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
index 761312bc38..c7a3549727 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-chat.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-chat
label:
en_US: qwen-plus-chat
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
index 430872fb31..608f52c296 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus-latest.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus-latest, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus-latest
label:
en_US: qwen-plus-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml
index f3fce30209..9089e57255 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-plus, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#bb0ffee88bwnk)
model: qwen-plus
label:
en_US: qwen-plus
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
index 2628d824fe..7ee0d44f2f 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0206.yaml
@@ -1,3 +1,6 @@
+# this model corresponds to qwen-turbo-0206, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
+
model: qwen-turbo-0206
label:
en_US: qwen-turbo-0206
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
index 8097459bf0..20a3f7eb64 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0624.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-turbo-0624, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
model: qwen-turbo-0624
label:
en_US: qwen-turbo-0624
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
index e43beeb195..ba73dec363 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-0919.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-turbo-0919, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
model: qwen-turbo-0919
label:
en_US: qwen-turbo-0919
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
index c30cb7ca10..d785b7fe85 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-chat.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-turbo, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
model: qwen-turbo-chat
label:
en_US: qwen-turbo-chat
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
index e443d6888b..fe38a4283c 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo-latest.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-turbo-latest, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
model: qwen-turbo-latest
label:
en_US: qwen-turbo-latest
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml
index 33f05967c2..215c9ec5fc 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml
@@ -1,3 +1,5 @@
+# this model corresponds to qwen-turbo, for more details
+# please refer to (https://help.aliyun.com/zh/model-studio/getting-started/models#ff492e2c10lub)
model: qwen-turbo
label:
en_US: qwen-turbo
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml
index 63b6074d0d..d80168ffc3 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0201.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-max-0201
label:
en_US: qwen-vl-max-0201
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
index fd20377002..50e10226a5 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max-0809.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-max-0809
label:
en_US: qwen-vl-max-0809
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
index 31a9fb51bb..21b127f56c 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-max.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-max
label:
en_US: qwen-vl-max
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
index 5f90cf48bc..03cb039d15 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0201.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-plus-0201
label:
en_US: qwen-vl-plus-0201
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
index 97820c0f3a..67b2b2ebdd 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus-0809.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-plus-0809
label:
en_US: qwen-vl-plus-0809
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
index 6af36cd6f3..f55764c6c0 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-vl-plus.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen-vl-plus
label:
en_US: qwen-vl-plus
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
index 158e2c7ee1..ea157f42de 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-1.5b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2-math-1.5b-instruct
label:
en_US: qwen2-math-1.5b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
index e26a6923d1..37052a9233 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-72b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2-math-72b-instruct
label:
en_US: qwen2-math-72b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
index 589119b26e..e182f1c27f 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2-math-7b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2-math-7b-instruct
label:
en_US: qwen2-math-7b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
index dd608fbf76..9e75ccc1f2 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-0.5b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-0.5b-instruct
label:
en_US: qwen2.5-0.5b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
index 08237b3958..67c9d31243 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-1.5b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-1.5b-instruct
label:
en_US: qwen2.5-1.5b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
index 640b019703..2a38be921c 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-14b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-14b-instruct
label:
en_US: qwen2.5-14b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
index 3a90ca7532..e6e4fbf978 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-32b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-32b-instruct
label:
en_US: qwen2.5-32b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
index b79755eb9b..8f250379a7 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-3b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-3b-instruct
label:
en_US: qwen2.5-3b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
index e9dd51a341..bb3cdd6141 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-72b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-72b-instruct
label:
en_US: qwen2.5-72b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
index 04f26cf5fe..fdcd3d4275 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-7b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-7b-instruct
label:
en_US: qwen2.5-7b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
index 04f26cf5fe..fdcd3d4275 100644
--- a/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen2.5-coder-7b-instruct.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
model: qwen2.5-7b-instruct
label:
en_US: qwen2.5-7b-instruct
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
index f4303c53d3..52e35d8b50 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v1.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw
model: text-embedding-v1
model_type: text-embedding
model_properties:
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
index f6be3544ed..5bb6a8f424 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v2.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw
model: text-embedding-v2
model_type: text-embedding
model_properties:
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml
index 171a379ee2..d8af0e2b63 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text-embedding-v3.yaml
@@ -1,3 +1,4 @@
+# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models#3383780daf8hw
model: text-embedding-v3
model_type: text-embedding
model_properties:
diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
index 5783d2e383..736cd44df8 100644
--- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py
@@ -4,6 +4,7 @@ from typing import Optional
import dashscope
import numpy as np
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import (
EmbeddingUsage,
@@ -27,6 +28,7 @@ class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel):
credentials: dict,
texts: list[str],
user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -35,6 +37,7 @@ class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
credentials_kwargs = self._to_credential_kwargs(credentials)
diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml
index fabe6d90e6..1a09c20fd9 100644
--- a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml
+++ b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml
@@ -37,14 +37,51 @@ model_credential_schema:
en_US: Model Name
zh_Hans: 模型名称
placeholder:
- en_US: Enter full model name
- zh_Hans: 输入模型全称
+ en_US: Enter your model name
+ zh_Hans: 输入模型名称
credential_form_schemas:
- variable: dashscope_api_key
- required: true
label:
en_US: API Key
type: secret-input
+ required: true
placeholder:
zh_Hans: 在此输入您的 API Key
en_US: Enter your API Key
+ - variable: context_size
+ label:
+ zh_Hans: 模型上下文长度
+ en_US: Model context size
+ required: true
+ type: text-input
+ default: '4096'
+ placeholder:
+ zh_Hans: 在此输入您的模型上下文长度
+ en_US: Enter your Model context size
+ - variable: max_tokens
+ label:
+ zh_Hans: 最大 token 上限
+ en_US: Upper bound for max tokens
+ default: '4096'
+ type: text-input
+ show_on:
+ - variable: __model_type
+ value: llm
+ - variable: function_calling_type
+ label:
+ en_US: Function calling
+ type: select
+ required: false
+ default: no_call
+ options:
+ - value: no_call
+ label:
+ en_US: Not Support
+ zh_Hans: 不支持
+ - value: function_call
+ label:
+ en_US: Support
+ zh_Hans: 支持
+ show_on:
+ - variable: __model_type
+ value: llm
diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py
index edd4a36d98..b6509cd26c 100644
--- a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py
@@ -7,6 +7,7 @@ import numpy as np
from openai import OpenAI
from tokenizers import Tokenizer
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
@@ -22,7 +23,14 @@ class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel):
def _get_tokenizer(self) -> Tokenizer:
return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer")
- def _invoke(self, model: str, credentials: dict, texts: list[str], user: str | None = None) -> TextEmbeddingResult:
+ def _invoke(
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: str | None = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
+ ) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -30,6 +38,7 @@ class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml
similarity index 96%
rename from api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml
rename to api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml
index c308f0a322..f5386be06d 100644
--- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash.yaml
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml
@@ -1,6 +1,6 @@
model: gemini-1.5-flash-001
label:
- en_US: Gemini 1.5 Flash
+ en_US: Gemini 1.5 Flash 001
model_type: llm
features:
- agent-thought
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml
new file mode 100644
index 0000000000..97bd44f06b
--- /dev/null
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml
@@ -0,0 +1,37 @@
+model: gemini-1.5-flash-002
+label:
+ en_US: Gemini 1.5 Flash 002
+model_type: llm
+features:
+ - agent-thought
+ - vision
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ en_US: Top k
+ type: int
+ help:
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_output_tokens
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml
similarity index 96%
rename from api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml
rename to api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml
index 744863e773..5e08f2294e 100644
--- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro.yaml
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml
@@ -1,6 +1,6 @@
model: gemini-1.5-pro-001
label:
- en_US: Gemini 1.5 Pro
+ en_US: Gemini 1.5 Pro 001
model_type: llm
features:
- agent-thought
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml
new file mode 100644
index 0000000000..8f327ea2f3
--- /dev/null
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml
@@ -0,0 +1,37 @@
+model: gemini-1.5-pro-002
+label:
+ en_US: Gemini 1.5 Pro 002
+model_type: llm
+features:
+ - agent-thought
+ - vision
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ en_US: Top k
+ type: int
+ help:
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_output_tokens
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml
new file mode 100644
index 0000000000..0f5eb34c0c
--- /dev/null
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml
@@ -0,0 +1,37 @@
+model: gemini-flash-experimental
+label:
+ en_US: Gemini Flash Experimental
+model_type: llm
+features:
+ - agent-thought
+ - vision
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ en_US: Top k
+ type: int
+ help:
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_output_tokens
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml
new file mode 100644
index 0000000000..fa31cabb85
--- /dev/null
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml
@@ -0,0 +1,37 @@
+model: gemini-pro-experimental
+label:
+ en_US: Gemini Pro Experimental
+model_type: llm
+features:
+ - agent-thought
+ - vision
+model_properties:
+ mode: chat
+ context_size: 1048576
+parameter_rules:
+ - name: temperature
+ use_template: temperature
+ - name: top_p
+ use_template: top_p
+ - name: top_k
+ label:
+ en_US: Top k
+ type: int
+ help:
+ en_US: Only sample from the top K options for each subsequent token.
+ required: false
+ - name: presence_penalty
+ use_template: presence_penalty
+ - name: frequency_penalty
+ use_template: frequency_penalty
+ - name: max_output_tokens
+ use_template: max_tokens
+ required: true
+ default: 8192
+ min: 1
+ max: 8192
+pricing:
+ input: '0.00'
+ output: '0.00'
+ unit: '0.000001'
+ currency: USD
diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py
index da69b7cdf3..1dd785d545 100644
--- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py
+++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py
@@ -2,6 +2,7 @@ import base64
import io
import json
import logging
+import time
from collections.abc import Generator
from typing import Optional, Union, cast
@@ -20,7 +21,6 @@ from google.api_core import exceptions
from google.cloud import aiplatform
from google.oauth2 import service_account
from PIL import Image
-from vertexai.generative_models import HarmBlockThreshold, HarmCategory
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
from core.model_runtime.entities.message_entities import (
@@ -34,6 +34,7 @@ from core.model_runtime.entities.message_entities import (
ToolPromptMessage,
UserPromptMessage,
)
+from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.errors.invoke import (
InvokeAuthorizationError,
InvokeBadRequestError,
@@ -503,20 +504,12 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
else:
history.append(content)
- safety_settings = {
- HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_HATE_SPEECH: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBlockThreshold.BLOCK_NONE,
- HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,
- }
-
google_model = glm.GenerativeModel(model_name=model, system_instruction=system_instruction)
response = google_model.generate_content(
contents=history,
generation_config=glm.GenerationConfig(**config_kwargs),
stream=stream,
- safety_settings=safety_settings,
tools=self._convert_tools_to_glm_tool(tools) if tools else None,
)
diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py
index 519373a7f3..fce9544df0 100644
--- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py
@@ -9,6 +9,7 @@ from google.cloud import aiplatform
from google.oauth2 import service_account
from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@@ -30,7 +31,12 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -38,6 +44,8 @@ class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel):
:param model: model name
:param credentials: model credentials
:param texts: texts to embed
+ :param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"]))
diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py
index 9cba2cb879..0dd4037c95 100644
--- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py
@@ -2,6 +2,7 @@ import time
from decimal import Decimal
from typing import Optional
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import (
AIModelEntity,
@@ -41,7 +42,12 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -50,6 +56,7 @@ class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
if ArkClientV3.is_legacy(credentials):
diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py
index 4d6f6dccd0..c21d0c0552 100644
--- a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py
@@ -7,6 +7,7 @@ from typing import Any, Optional
import numpy as np
from requests import Response, post
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.invoke import InvokeError
@@ -70,7 +71,12 @@ class WenxinTextEmbeddingModel(TextEmbeddingModel):
return WenxinTextEmbedding(api_key, secret_key)
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -79,6 +85,7 @@ class WenxinTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
diff --git a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py
index 8043af1d6c..1627239132 100644
--- a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py
@@ -3,6 +3,7 @@ from typing import Optional
from xinference_client.client.restful.restful_client import Client, RESTfulEmbeddingModelHandle
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
@@ -25,7 +26,12 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -40,6 +46,7 @@ class XinferenceTextEmbeddingModel(TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
server_url = credentials["server_url"]
diff --git a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py
index ee20954381..14a529dddf 100644
--- a/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py
+++ b/api/core/model_runtime/model_providers/zhipuai/text_embedding/text_embedding.py
@@ -1,6 +1,7 @@
import time
from typing import Optional
+from core.embedding.embedding_constant import EmbeddingInputType
from core.model_runtime.entities.model_entities import PriceType
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
@@ -15,7 +16,12 @@ class ZhipuAITextEmbeddingModel(_CommonZhipuaiAI, TextEmbeddingModel):
"""
def _invoke(
- self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None
+ self,
+ model: str,
+ credentials: dict,
+ texts: list[str],
+ user: Optional[str] = None,
+ input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT,
) -> TextEmbeddingResult:
"""
Invoke text embedding model
@@ -24,6 +30,7 @@ class ZhipuAITextEmbeddingModel(_CommonZhipuaiAI, TextEmbeddingModel):
:param credentials: model credentials
:param texts: texts to embed
:param user: unique user id
+ :param input_type: input type
:return: embeddings result
"""
credentials_kwargs = self._to_credential_kwargs(credentials)
diff --git a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
index 612542dab1..6dcd98dcfd 100644
--- a/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
+++ b/api/core/rag/datasource/vdb/analyticdb/analyticdb_vector.py
@@ -40,19 +40,8 @@ class AnalyticdbConfig(BaseModel):
class AnalyticdbVector(BaseVector):
- _instance = None
- _init = False
-
- def __new__(cls, *args, **kwargs):
- if cls._instance is None:
- cls._instance = super().__new__(cls)
- return cls._instance
-
def __init__(self, collection_name: str, config: AnalyticdbConfig):
- # collection_name must be updated every time
self._collection_name = collection_name.lower()
- if AnalyticdbVector._init:
- return
try:
from alibabacloud_gpdb20160503.client import Client
from alibabacloud_tea_openapi import models as open_api_models
@@ -62,7 +51,6 @@ class AnalyticdbVector(BaseVector):
self._client_config = open_api_models.Config(user_agent="dify", **config.to_analyticdb_client_params())
self._client = Client(self._client_config)
self._initialize()
- AnalyticdbVector._init = True
def _initialize(self) -> None:
cache_key = f"vector_indexing_{self.config.instance_id}"
@@ -257,11 +245,14 @@ class AnalyticdbVector(BaseVector):
documents = []
for match in response.body.matches.match:
if match.score > score_threshold:
+ metadata = json.loads(match.metadata.get("metadata_"))
+ metadata["score"] = match.score
doc = Document(
page_content=match.metadata.get("page_content"),
- metadata=json.loads(match.metadata.get("metadata_")),
+ metadata=metadata,
)
documents.append(doc)
+ documents = sorted(documents, key=lambda x: x.metadata["score"], reverse=True)
return documents
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
@@ -286,12 +277,14 @@ class AnalyticdbVector(BaseVector):
for match in response.body.matches.match:
if match.score > score_threshold:
metadata = json.loads(match.metadata.get("metadata_"))
+ metadata["score"] = match.score
doc = Document(
page_content=match.metadata.get("page_content"),
vector=match.metadata.get("vector"),
metadata=metadata,
)
documents.append(doc)
+ documents = sorted(documents, key=lambda x: x.metadata["score"], reverse=True)
return documents
def delete(self) -> None:
diff --git a/api/core/rag/datasource/vdb/vector_base.py b/api/core/rag/datasource/vdb/vector_base.py
index 1a0dc7f48b..22e191340d 100644
--- a/api/core/rag/datasource/vdb/vector_base.py
+++ b/api/core/rag/datasource/vdb/vector_base.py
@@ -45,6 +45,7 @@ class BaseVector(ABC):
def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]:
raise NotImplementedError
+ @abstractmethod
def delete(self) -> None:
raise NotImplementedError
diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py
index fe7eaa32e6..0ffc89b214 100644
--- a/api/core/rag/extractor/extract_processor.py
+++ b/api/core/rag/extractor/extract_processor.py
@@ -124,7 +124,7 @@ class ExtractProcessor:
extractor = UnstructuredPPTXExtractor(file_path, unstructured_api_url)
elif file_extension == ".xml":
extractor = UnstructuredXmlExtractor(file_path, unstructured_api_url)
- elif file_extension == "epub":
+ elif file_extension == ".epub":
extractor = UnstructuredEpubExtractor(file_path, unstructured_api_url)
else:
# txt
@@ -146,7 +146,7 @@ class ExtractProcessor:
extractor = WordExtractor(file_path, upload_file.tenant_id, upload_file.created_by)
elif file_extension == ".csv":
extractor = CSVExtractor(file_path, autodetect_encoding=True)
- elif file_extension == "epub":
+ elif file_extension == ".epub":
extractor = UnstructuredEpubExtractor(file_path)
else:
# txt
diff --git a/api/core/tools/provider/_position.yaml b/api/core/tools/provider/_position.yaml
index 40c3356116..e0a8e7511e 100644
--- a/api/core/tools/provider/_position.yaml
+++ b/api/core/tools/provider/_position.yaml
@@ -34,5 +34,9 @@
- feishu_base
- feishu_document
- feishu_message
+- feishu_wiki
+- feishu_task
+- feishu_calendar
+- feishu_spreadsheet
- slack
- tianditu
diff --git a/api/core/tools/provider/builtin/comfyui/comfyui.yaml b/api/core/tools/provider/builtin/comfyui/comfyui.yaml
index 066fd85308..3891eebf3a 100644
--- a/api/core/tools/provider/builtin/comfyui/comfyui.yaml
+++ b/api/core/tools/provider/builtin/comfyui/comfyui.yaml
@@ -39,4 +39,4 @@ credentials_for_provider:
en_US: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors
zh_Hans: ComfyUI服务器的模型名称, 比如 xxx.safetensors
pt_BR: The checkpoint name of the ComfyUI server, e.g. xxx.safetensors
- url: https://docs.dify.ai/tutorials/tool-configuration/comfyui
+ url: https://github.com/comfyanonymous/ComfyUI#installing
diff --git a/api/core/tools/provider/builtin/feishu_calendar/_assets/icon.png b/api/core/tools/provider/builtin/feishu_calendar/_assets/icon.png
new file mode 100644
index 0000000000..2a934747a9
Binary files /dev/null and b/api/core/tools/provider/builtin/feishu_calendar/_assets/icon.png differ
diff --git a/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.py b/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.py
new file mode 100644
index 0000000000..a46a9fa9e8
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.py
@@ -0,0 +1,7 @@
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+from core.tools.utils.feishu_api_utils import auth
+
+
+class FeishuCalendarProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.yaml b/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.yaml
new file mode 100644
index 0000000000..db5bab5c10
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/feishu_calendar.yaml
@@ -0,0 +1,36 @@
+identity:
+ author: Doug Lea
+ name: feishu_calendar
+ label:
+ en_US: Feishu Calendar
+ zh_Hans: 飞书日历
+ description:
+ en_US: |
+ Feishu calendar, requires the following permissions: calendar:calendar:read、calendar:calendar、contact:user.id:readonly.
+ zh_Hans: |
+ 飞书日历,需要开通以下权限: calendar:calendar:read、calendar:calendar、contact:user.id:readonly。
+ icon: icon.png
+ tags:
+ - social
+ - productivity
+credentials_for_provider:
+ app_id:
+ type: text-input
+ required: true
+ label:
+ en_US: APP ID
+ placeholder:
+ en_US: Please input your feishu app id
+ zh_Hans: 请输入你的飞书 app id
+ help:
+ en_US: Get your app_id and app_secret from Feishu
+ zh_Hans: 从飞书获取您的 app_id 和 app_secret
+ url: https://open.larkoffice.com/app
+ app_secret:
+ type: secret-input
+ required: true
+ label:
+ en_US: APP Secret
+ placeholder:
+ en_US: Please input your app secret
+ zh_Hans: 请输入你的飞书 app secret
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.py b/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.py
new file mode 100644
index 0000000000..8f83aea5ab
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.py
@@ -0,0 +1,20 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class AddEventAttendeesTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ event_id = tool_parameters.get("event_id")
+ attendee_phone_or_email = tool_parameters.get("attendee_phone_or_email")
+ need_notification = tool_parameters.get("need_notification", True)
+
+ res = client.add_event_attendees(event_id, attendee_phone_or_email, need_notification)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.yaml
new file mode 100644
index 0000000000..b7744499b0
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/add_event_attendees.yaml
@@ -0,0 +1,54 @@
+identity:
+ name: add_event_attendees
+ author: Doug Lea
+ label:
+ en_US: Add Event Attendees
+ zh_Hans: 添加日程参会人
+description:
+ human:
+ en_US: Add Event Attendees
+ zh_Hans: 添加日程参会人
+ llm: A tool for adding attendees to events in Feishu. (在飞书中添加日程参会人)
+parameters:
+ - name: event_id
+ type: string
+ required: true
+ label:
+ en_US: Event ID
+ zh_Hans: 日程 ID
+ human_description:
+ en_US: |
+ The ID of the event, which will be returned when the event is created. For example: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0.
+ zh_Hans: |
+ 创建日程时会返回日程 ID。例如: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0。
+ llm_description: |
+ 日程 ID,创建日程时会返回日程 ID。例如: fb2a6406-26d6-4c8d-a487-6f0246c94d2f_0。
+ form: llm
+
+ - name: need_notification
+ type: boolean
+ required: false
+ default: true
+ label:
+ en_US: Need Notification
+ zh_Hans: 是否需要通知
+ human_description:
+ en_US: |
+ Whether to send a Bot notification to attendees. true: send, false: do not send.
+ zh_Hans: |
+ 是否给参与人发送 Bot 通知,true: 发送,false: 不发送。
+ llm_description: |
+ 是否给参与人发送 Bot 通知,true: 发送,false: 不发送。
+ form: form
+
+ - name: attendee_phone_or_email
+ type: string
+ required: true
+ label:
+ en_US: Attendee Phone or Email
+ zh_Hans: 参会人电话或邮箱
+ human_description:
+ en_US: The list of attendee emails or phone numbers, separated by commas.
+ zh_Hans: 日程参会人邮箱或者手机号列表,使用逗号分隔。
+ llm_description: 日程参会人邮箱或者手机号列表,使用逗号分隔。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.py b/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.py
new file mode 100644
index 0000000000..8820bebdbe
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.py
@@ -0,0 +1,26 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class CreateEventTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ summary = tool_parameters.get("summary")
+ description = tool_parameters.get("description")
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ attendee_ability = tool_parameters.get("attendee_ability")
+ need_notification = tool_parameters.get("need_notification", True)
+ auto_record = tool_parameters.get("auto_record", False)
+
+ res = client.create_event(
+ summary, description, start_time, end_time, attendee_ability, need_notification, auto_record
+ )
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.yaml
new file mode 100644
index 0000000000..f0784221ce
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/create_event.yaml
@@ -0,0 +1,119 @@
+identity:
+ name: create_event
+ author: Doug Lea
+ label:
+ en_US: Create Event
+ zh_Hans: 创建日程
+description:
+ human:
+ en_US: Create Event
+ zh_Hans: 创建日程
+ llm: A tool for creating events in Feishu.(创建飞书日程)
+parameters:
+ - name: summary
+ type: string
+ required: false
+ label:
+ en_US: Summary
+ zh_Hans: 日程标题
+ human_description:
+ en_US: The title of the event. If not filled, the event title will display (No Subject).
+ zh_Hans: 日程标题,若不填则日程标题显示 (无主题)。
+ llm_description: 日程标题,若不填则日程标题显示 (无主题)。
+ form: llm
+
+ - name: description
+ type: string
+ required: false
+ label:
+ en_US: Description
+ zh_Hans: 日程描述
+ human_description:
+ en_US: The description of the event.
+ zh_Hans: 日程描述。
+ llm_description: 日程描述。
+ form: llm
+
+ - name: need_notification
+ type: boolean
+ required: false
+ default: true
+ label:
+ en_US: Need Notification
+ zh_Hans: 是否发送通知
+ human_description:
+ en_US: |
+ Whether to send a bot message when the event is created, true: send, false: do not send.
+ zh_Hans: 创建日程时是否发送 bot 消息,true:发送,false:不发送。
+ llm_description: 创建日程时是否发送 bot 消息,true:发送,false:不发送。
+ form: form
+
+ - name: start_time
+ type: string
+ required: true
+ label:
+ en_US: Start Time
+ zh_Hans: 开始时间
+ human_description:
+ en_US: |
+ The start time of the event, format: 2006-01-02 15:04:05.
+ zh_Hans: 日程开始时间,格式:2006-01-02 15:04:05。
+ llm_description: 日程开始时间,格式:2006-01-02 15:04:05。
+ form: llm
+
+ - name: end_time
+ type: string
+ required: true
+ label:
+ en_US: End Time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: |
+ The end time of the event, format: 2006-01-02 15:04:05.
+ zh_Hans: 日程结束时间,格式:2006-01-02 15:04:05。
+ llm_description: 日程结束时间,格式:2006-01-02 15:04:05。
+ form: llm
+
+ - name: attendee_ability
+ type: select
+ required: false
+ options:
+ - value: none
+ label:
+ en_US: none
+ zh_Hans: 无
+ - value: can_see_others
+ label:
+ en_US: can_see_others
+ zh_Hans: 可以查看参与人列表
+ - value: can_invite_others
+ label:
+ en_US: can_invite_others
+ zh_Hans: 可以邀请其它参与人
+ - value: can_modify_event
+ label:
+ en_US: can_modify_event
+ zh_Hans: 可以编辑日程
+ default: "none"
+ label:
+ en_US: attendee_ability
+ zh_Hans: 参会人权限
+ human_description:
+ en_US: Attendee ability, optional values are none, can_see_others, can_invite_others, can_modify_event, with a default value of none.
+ zh_Hans: 参会人权限,可选值有无、可以查看参与人列表、可以邀请其它参与人、可以编辑日程,默认值为无。
+ llm_description: 参会人权限,可选值有无、可以查看参与人列表、可以邀请其它参与人、可以编辑日程,默认值为无。
+ form: form
+
+ - name: auto_record
+ type: boolean
+ required: false
+ default: false
+ label:
+ en_US: Auto Record
+ zh_Hans: 自动录制
+ human_description:
+ en_US: |
+ Whether to enable automatic recording, true: enabled, automatically record when the meeting starts; false: not enabled.
+ zh_Hans: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。
+ llm_description: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.py b/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.py
new file mode 100644
index 0000000000..144889692f
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.py
@@ -0,0 +1,19 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class DeleteEventTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ event_id = tool_parameters.get("event_id")
+ need_notification = tool_parameters.get("need_notification", True)
+
+ res = client.delete_event(event_id, need_notification)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.yaml
new file mode 100644
index 0000000000..54fdb04acc
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/delete_event.yaml
@@ -0,0 +1,38 @@
+identity:
+ name: delete_event
+ author: Doug Lea
+ label:
+ en_US: Delete Event
+ zh_Hans: 删除日程
+description:
+ human:
+ en_US: Delete Event
+ zh_Hans: 删除日程
+ llm: A tool for deleting events in Feishu.(在飞书中删除日程)
+parameters:
+ - name: event_id
+ type: string
+ required: true
+ label:
+ en_US: Event ID
+ zh_Hans: 日程 ID
+ human_description:
+ en_US: |
+ The ID of the event, for example: e8b9791c-39ae-4908-8ad8-66b13159b9fb_0.
+ zh_Hans: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。
+ llm_description: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。
+ form: llm
+
+ - name: need_notification
+ type: boolean
+ required: false
+ default: true
+ label:
+ en_US: Need Notification
+ zh_Hans: 是否需要通知
+ human_description:
+ en_US: |
+ Indicates whether to send bot notifications to event participants upon deletion. true: send, false: do not send.
+ zh_Hans: 删除日程是否给日程参与人发送 bot 通知,true:发送,false:不发送。
+ llm_description: 删除日程是否给日程参与人发送 bot 通知,true:发送,false:不发送。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.py b/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.py
new file mode 100644
index 0000000000..a2cd5a8b17
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.py
@@ -0,0 +1,18 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class GetPrimaryCalendarTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+
+ res = client.get_primary_calendar(user_id_type)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.yaml
new file mode 100644
index 0000000000..3440c85d4a
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/get_primary_calendar.yaml
@@ -0,0 +1,37 @@
+identity:
+ name: get_primary_calendar
+ author: Doug Lea
+ label:
+ en_US: Get Primary Calendar
+ zh_Hans: 查询主日历信息
+description:
+ human:
+ en_US: Get Primary Calendar
+ zh_Hans: 查询主日历信息
+ llm: A tool for querying primary calendar information in Feishu.(在飞书中查询主日历信息)
+parameters:
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.py b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.py
new file mode 100644
index 0000000000..8815b4c9c8
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.py
@@ -0,0 +1,21 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class ListEventsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ page_token = tool_parameters.get("page_token")
+ page_size = tool_parameters.get("page_size")
+
+ res = client.list_events(start_time, end_time, page_token, page_size)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml
new file mode 100644
index 0000000000..f4a5bfe6ba
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/list_events.yaml
@@ -0,0 +1,62 @@
+identity:
+ name: list_events
+ author: Doug Lea
+ label:
+ en_US: List Events
+ zh_Hans: 获取日程列表
+description:
+ human:
+ en_US: List Events
+ zh_Hans: 获取日程列表
+ llm: A tool for listing events in Feishu.(在飞书中获取日程列表)
+parameters:
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 开始时间
+ human_description:
+ en_US: |
+ The start time, defaults to 0:00 of the current day if not provided, format: 2006-01-02 15:04:05.
+ zh_Hans: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。
+ llm_description: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: |
+ The end time, defaults to 23:59 of the current day if not provided, format: 2006-01-02 15:04:05.
+ zh_Hans: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。
+ llm_description: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。
+ form: llm
+
+ - name: page_size
+ type: number
+ required: false
+ default: 50
+ label:
+ en_US: Page Size
+ zh_Hans: 分页大小
+ human_description:
+ en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 50, and the value range is [50,1000].
+ zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。
+ llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 50,取值范围为 [50,1000]。
+ form: llm
+
+ - name: page_token
+ type: string
+ required: false
+ label:
+ en_US: Page Token
+ zh_Hans: 分页标记
+ human_description:
+ en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal.
+ zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.py b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.py
new file mode 100644
index 0000000000..dc365205a4
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class SearchEventsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ query = tool_parameters.get("query")
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ page_token = tool_parameters.get("page_token")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+ page_size = tool_parameters.get("page_size", 20)
+
+ res = client.search_events(query, start_time, end_time, page_token, user_id_type, page_size)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml
new file mode 100644
index 0000000000..e92a282091
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/search_events.yaml
@@ -0,0 +1,100 @@
+identity:
+ name: search_events
+ author: Doug Lea
+ label:
+ en_US: Search Events
+ zh_Hans: 搜索日程
+description:
+ human:
+ en_US: Search Events
+ zh_Hans: 搜索日程
+ llm: A tool for searching events in Feishu.(在飞书中搜索日程)
+parameters:
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
+
+ - name: query
+ type: string
+ required: true
+ label:
+ en_US: Query
+ zh_Hans: 搜索关键字
+ human_description:
+ en_US: The search keyword used for fuzzy searching event names, with a maximum input of 200 characters.
+ zh_Hans: 用于模糊查询日程名称的搜索关键字,最大输入 200 字符。
+ llm_description: 用于模糊查询日程名称的搜索关键字,最大输入 200 字符。
+ form: llm
+
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 开始时间
+ human_description:
+ en_US: |
+ The start time, defaults to 0:00 of the current day if not provided, format: 2006-01-02 15:04:05.
+ zh_Hans: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。
+ llm_description: 开始时间,不传值时默认当天 0 点时间,格式为:2006-01-02 15:04:05。
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: |
+ The end time, defaults to 23:59 of the current day if not provided, format: 2006-01-02 15:04:05.
+ zh_Hans: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。
+ llm_description: 结束时间,不传值时默认当天 23:59 分时间,格式为:2006-01-02 15:04:05。
+ form: llm
+
+ - name: page_size
+ type: number
+ required: false
+ default: 20
+ label:
+ en_US: Page Size
+ zh_Hans: 分页大小
+ human_description:
+ en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [10,100].
+ zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。
+ llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [10,100]。
+ form: llm
+
+ - name: page_token
+ type: string
+ required: false
+ label:
+ en_US: Page Token
+ zh_Hans: 分页标记
+ human_description:
+ en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal.
+ zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.py b/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.py
new file mode 100644
index 0000000000..85bcb1d3f6
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.py
@@ -0,0 +1,24 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class UpdateEventTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ event_id = tool_parameters.get("event_id")
+ summary = tool_parameters.get("summary")
+ description = tool_parameters.get("description")
+ need_notification = tool_parameters.get("need_notification", True)
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ auto_record = tool_parameters.get("auto_record", False)
+
+ res = client.update_event(event_id, summary, description, need_notification, start_time, end_time, auto_record)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.yaml b/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.yaml
new file mode 100644
index 0000000000..4d60dbf8c8
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_calendar/tools/update_event.yaml
@@ -0,0 +1,100 @@
+identity:
+ name: update_event
+ author: Doug Lea
+ label:
+ en_US: Update Event
+ zh_Hans: 更新日程
+description:
+ human:
+ en_US: Update Event
+ zh_Hans: 更新日程
+ llm: A tool for updating events in Feishu.(更新飞书中的日程)
+parameters:
+ - name: event_id
+ type: string
+ required: true
+ label:
+ en_US: Event ID
+ zh_Hans: 日程 ID
+ human_description:
+ en_US: |
+ The ID of the event, for example: e8b9791c-39ae-4908-8ad8-66b13159b9fb_0.
+ zh_Hans: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。
+ llm_description: 日程 ID,例如:e8b9791c-39ae-4908-8ad8-66b13159b9fb_0。
+ form: llm
+
+ - name: summary
+ type: string
+ required: false
+ label:
+ en_US: Summary
+ zh_Hans: 日程标题
+ human_description:
+ en_US: The title of the event.
+ zh_Hans: 日程标题。
+ llm_description: 日程标题。
+ form: llm
+
+ - name: description
+ type: string
+ required: false
+ label:
+ en_US: Description
+ zh_Hans: 日程描述
+ human_description:
+ en_US: The description of the event.
+ zh_Hans: 日程描述。
+ llm_description: 日程描述。
+ form: llm
+
+ - name: need_notification
+ type: boolean
+ required: false
+ label:
+ en_US: Need Notification
+ zh_Hans: 是否发送通知
+ human_description:
+ en_US: |
+ Whether to send a bot message when the event is updated, true: send, false: do not send.
+ zh_Hans: 更新日程时是否发送 bot 消息,true:发送,false:不发送。
+ llm_description: 更新日程时是否发送 bot 消息,true:发送,false:不发送。
+ form: form
+
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 开始时间
+ human_description:
+ en_US: |
+ The start time of the event, format: 2006-01-02 15:04:05.
+ zh_Hans: 日程开始时间,格式:2006-01-02 15:04:05。
+ llm_description: 日程开始时间,格式:2006-01-02 15:04:05。
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: |
+ The end time of the event, format: 2006-01-02 15:04:05.
+ zh_Hans: 日程结束时间,格式:2006-01-02 15:04:05。
+ llm_description: 日程结束时间,格式:2006-01-02 15:04:05。
+ form: llm
+
+ - name: auto_record
+ type: boolean
+ required: false
+ label:
+ en_US: Auto Record
+ zh_Hans: 自动录制
+ human_description:
+ en_US: |
+ Whether to enable automatic recording, true: enabled, automatically record when the meeting starts; false: not enabled.
+ zh_Hans: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。
+ llm_description: 是否开启自动录制,true:开启,会议开始后自动录制;false:不开启。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_document/feishu_document.py b/api/core/tools/provider/builtin/feishu_document/feishu_document.py
index b0a1e393eb..217ae52082 100644
--- a/api/core/tools/provider/builtin/feishu_document/feishu_document.py
+++ b/api/core/tools/provider/builtin/feishu_document/feishu_document.py
@@ -1,15 +1,7 @@
-from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
-from core.tools.utils.feishu_api_utils import FeishuRequest
+from core.tools.utils.feishu_api_utils import auth
class FeishuDocumentProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:
- app_id = credentials.get("app_id")
- app_secret = credentials.get("app_secret")
- if not app_id or not app_secret:
- raise ToolProviderCredentialValidationError("app_id and app_secret is required")
- try:
- assert FeishuRequest(app_id, app_secret).tenant_access_token is not None
- except Exception as e:
- raise ToolProviderCredentialValidationError(str(e))
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_document/feishu_document.yaml b/api/core/tools/provider/builtin/feishu_document/feishu_document.yaml
index 8eaa6b2704..8f9afa6149 100644
--- a/api/core/tools/provider/builtin/feishu_document/feishu_document.yaml
+++ b/api/core/tools/provider/builtin/feishu_document/feishu_document.yaml
@@ -5,8 +5,10 @@ identity:
en_US: Lark Cloud Document
zh_Hans: 飞书云文档
description:
- en_US: Lark Cloud Document
- zh_Hans: 飞书云文档
+ en_US: |
+ Lark cloud document, requires the following permissions: docx:document、drive:drive、docs:document.content:read.
+ zh_Hans: |
+ 飞书云文档,需要开通以下权限: docx:document、drive:drive、docs:document.content:read。
icon: icon.svg
tags:
- social
@@ -23,7 +25,7 @@ credentials_for_provider:
help:
en_US: Get your app_id and app_secret from Feishu
zh_Hans: 从飞书获取您的 app_id 和 app_secret
- url: https://open.feishu.cn
+ url: https://open.larkoffice.com/app
app_secret:
type: secret-input
required: true
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/create_document.yaml b/api/core/tools/provider/builtin/feishu_document/tools/create_document.yaml
index ddf2729f0e..85382e9d8e 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/create_document.yaml
+++ b/api/core/tools/provider/builtin/feishu_document/tools/create_document.yaml
@@ -7,7 +7,7 @@ identity:
description:
human:
en_US: Create Lark document
- zh_Hans: 创建飞书文档,支持创建空文档和带内容的文档,支持 markdown 语法创建。
+ zh_Hans: 创建飞书文档,支持创建空文档和带内容的文档,支持 markdown 语法创建。应用需要开启机器人能力(https://open.feishu.cn/document/faq/trouble-shooting/how-to-enable-bot-ability)。
llm: A tool for creating Feishu documents.
parameters:
- name: title
@@ -41,7 +41,8 @@ parameters:
en_US: folder_token
zh_Hans: 文档所在文件夹的 Token
human_description:
- en_US: The token of the folder where the document is located. If it is not passed or is empty, it means the root directory.
- zh_Hans: 文档所在文件夹的 Token,不传或传空表示根目录。
- llm_description: 文档所在文件夹的 Token,不传或传空表示根目录。
+ en_US: |
+ The token of the folder where the document is located. If it is not passed or is empty, it means the root directory. For Example: https://svi136aogf123.feishu.cn/drive/folder/JgR9fiG9AlPt8EdsSNpcGjIInbf
+ zh_Hans: 文档所在文件夹的 Token,不传或传空表示根目录。例如:https://svi136aogf123.feishu.cn/drive/folder/JgR9fiG9AlPt8EdsSNpcGjIInbf。
+ llm_description: 文档所在文件夹的 Token,不传或传空表示根目录。例如:https://svi136aogf123.feishu.cn/drive/folder/JgR9fiG9AlPt8EdsSNpcGjIInbf。
form: llm
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.py b/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.py
index c94a5f70ed..e67a017fac 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.py
+++ b/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.py
@@ -12,8 +12,8 @@ class GetDocumentRawContentTool(BuiltinTool):
client = FeishuRequest(app_id, app_secret)
document_id = tool_parameters.get("document_id")
- mode = tool_parameters.get("mode")
- lang = tool_parameters.get("lang", 0)
+ mode = tool_parameters.get("mode", "markdown")
+ lang = tool_parameters.get("lang", "0")
res = client.get_document_content(document_id, mode, lang)
return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml b/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml
index 51eda73a60..15e827cde9 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml
+++ b/api/core/tools/provider/builtin/feishu_document/tools/get_document_content.yaml
@@ -23,8 +23,18 @@ parameters:
form: llm
- name: mode
- type: string
+ type: select
required: false
+ options:
+ - value: text
+ label:
+ en_US: text
+ zh_Hans: text
+ - value: markdown
+ label:
+ en_US: markdown
+ zh_Hans: markdown
+ default: "markdown"
label:
en_US: mode
zh_Hans: 文档返回格式
@@ -32,18 +42,29 @@ parameters:
en_US: Format of the document return, optional values are text, markdown, can be empty, default is markdown.
zh_Hans: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。
llm_description: 文档返回格式,可选值有 text、markdown,可以为空,默认值为 markdown。
- form: llm
+ form: form
- name: lang
- type: number
+ type: select
required: false
- default: 0
+ options:
+ - value: "0"
+ label:
+ en_US: User's default name
+ zh_Hans: 用户的默认名称
+ - value: "1"
+ label:
+ en_US: User's English name
+ zh_Hans: 用户的英文名称
+ default: "0"
label:
en_US: lang
zh_Hans: 指定@用户的语言
human_description:
en_US: |
Specifies the language for MentionUser, optional values are [0, 1]. 0: User's default name, 1: User's English name, default is 0.
- zh_Hans: 指定返回的 MentionUser,即 @用户 的语言,可选值有 [0,1]。0:该用户的默认名称,1:该用户的英文名称,默认值为 0。
- llm_description: 指定返回的 MentionUser,即 @用户 的语言,可选值有 [0,1]。0:该用户的默认名称,1:该用户的英文名称,默认值为 0。
- form: llm
+ zh_Hans: |
+ 指定返回的 MentionUser,即@用户的语言,可选值有 [0,1]。0: 该用户的默认名称,1: 该用户的英文名称,默认值为 0。
+ llm_description: |
+ 指定返回的 MentionUser,即@用户的语言,可选值有 [0,1]。0: 该用户的默认名称,1: 该用户的英文名称,默认值为 0。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.py b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.py
index 572a7abf28..dd57c6870d 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.py
+++ b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.py
@@ -12,8 +12,9 @@ class ListDocumentBlockTool(BuiltinTool):
client = FeishuRequest(app_id, app_secret)
document_id = tool_parameters.get("document_id")
- page_size = tool_parameters.get("page_size", 500)
page_token = tool_parameters.get("page_token", "")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+ page_size = tool_parameters.get("page_size", 500)
- res = client.list_document_blocks(document_id, page_token, page_size)
+ res = client.list_document_blocks(document_id, page_token, user_id_type, page_size)
return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml
index 019ac98390..5b8ef7d53c 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml
+++ b/api/core/tools/provider/builtin/feishu_document/tools/list_document_blocks.yaml
@@ -46,12 +46,12 @@ parameters:
en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
- form: llm
+ form: form
- name: page_size
type: number
required: false
- default: "500"
+ default: 500
label:
en_US: page_size
zh_Hans: 分页大小
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/write_document.py b/api/core/tools/provider/builtin/feishu_document/tools/write_document.py
index 6061250e48..59f08f53dc 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/write_document.py
+++ b/api/core/tools/provider/builtin/feishu_document/tools/write_document.py
@@ -13,7 +13,7 @@ class CreateDocumentTool(BuiltinTool):
document_id = tool_parameters.get("document_id")
content = tool_parameters.get("content")
- position = tool_parameters.get("position")
+ position = tool_parameters.get("position", "end")
res = client.write_document(document_id, content, position)
return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml b/api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml
index 4282e3dcf3..de70f4e772 100644
--- a/api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml
+++ b/api/core/tools/provider/builtin/feishu_document/tools/write_document.yaml
@@ -35,25 +35,23 @@ parameters:
form: llm
- name: position
- type: string
+ type: select
required: false
- label:
- en_US: position
- zh_Hans: 添加位置
- human_description:
- en_US: |
- Enumeration values: start or end. Use 'start' to add content at the beginning of the document, and 'end' to add content at the end. The default value is 'end'.
- zh_Hans: 枚举值:start 或 end。使用 'start' 在文档开头添加内容,使用 'end' 在文档结尾添加内容,默认值为 'end'。
- llm_description: |
- 枚举值 start、end,start: 在文档开头添加内容;end: 在文档结尾添加内容,默认值为 end。
- form: llm
options:
- value: start
label:
- en_US: start
- zh_Hans: 在文档开头添加内容
+ en_US: document start
+ zh_Hans: 文档开始
- value: end
label:
- en_US: end
- zh_Hans: 在文档结尾添加内容
- default: start
+ en_US: document end
+ zh_Hans: 文档结束
+ default: "end"
+ label:
+ en_US: position
+ zh_Hans: 内容添加位置
+ human_description:
+ en_US: Content insertion position, optional values are start, end. 'start' means adding content at the beginning of the document; 'end' means adding content at the end of the document. The default value is end.
+ zh_Hans: 内容添加位置,可选值有 start、end。start 表示在文档开头添加内容;end 表示在文档结尾添加内容,默认值为 end。
+ llm_description: 内容添加位置,可选值有 start、end。start 表示在文档开头添加内容;end 表示在文档结尾添加内容,默认值为 end。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_message/feishu_message.py b/api/core/tools/provider/builtin/feishu_message/feishu_message.py
index 7b3adb9293..a3b5473769 100644
--- a/api/core/tools/provider/builtin/feishu_message/feishu_message.py
+++ b/api/core/tools/provider/builtin/feishu_message/feishu_message.py
@@ -1,15 +1,7 @@
-from core.tools.errors import ToolProviderCredentialValidationError
from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
-from core.tools.utils.feishu_api_utils import FeishuRequest
+from core.tools.utils.feishu_api_utils import auth
class FeishuMessageProvider(BuiltinToolProviderController):
def _validate_credentials(self, credentials: dict) -> None:
- app_id = credentials.get("app_id")
- app_secret = credentials.get("app_secret")
- if not app_id or not app_secret:
- raise ToolProviderCredentialValidationError("app_id and app_secret is required")
- try:
- assert FeishuRequest(app_id, app_secret).tenant_access_token is not None
- except Exception as e:
- raise ToolProviderCredentialValidationError(str(e))
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_message/feishu_message.yaml b/api/core/tools/provider/builtin/feishu_message/feishu_message.yaml
index 1bd8953ddd..56683ec168 100644
--- a/api/core/tools/provider/builtin/feishu_message/feishu_message.yaml
+++ b/api/core/tools/provider/builtin/feishu_message/feishu_message.yaml
@@ -5,8 +5,10 @@ identity:
en_US: Lark Message
zh_Hans: 飞书消息
description:
- en_US: Lark Message
- zh_Hans: 飞书消息
+ en_US: |
+ Lark message, requires the following permissions: im:message、im:message.group_msg.
+ zh_Hans: |
+ 飞书消息,需要开通以下权限: im:message、im:message.group_msg。
icon: icon.svg
tags:
- social
@@ -23,7 +25,7 @@ credentials_for_provider:
help:
en_US: Get your app_id and app_secret from Feishu
zh_Hans: 从飞书获取您的 app_id 和 app_secret
- url: https://open.feishu.cn
+ url: https://open.larkoffice.com/app
app_secret:
type: secret-input
required: true
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.py b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.py
new file mode 100644
index 0000000000..7eb29230b2
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class GetChatMessagesTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ container_id = tool_parameters.get("container_id")
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ page_token = tool_parameters.get("page_token")
+ sort_type = tool_parameters.get("sort_type", "ByCreateTimeAsc")
+ page_size = tool_parameters.get("page_size", 20)
+
+ res = client.get_chat_messages(container_id, start_time, end_time, page_token, sort_type, page_size)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml
new file mode 100644
index 0000000000..153c8c80e5
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_message/tools/get_chat_messages.yaml
@@ -0,0 +1,96 @@
+identity:
+ name: get_chat_messages
+ author: Doug Lea
+ label:
+ en_US: Get Chat Messages
+ zh_Hans: 获取指定单聊、群聊的消息历史
+description:
+ human:
+ en_US: Get Chat Messages
+ zh_Hans: 获取指定单聊、群聊的消息历史
+ llm: A tool for getting chat messages from specific one-on-one chats or group chats.(获取指定单聊、群聊的消息历史)
+parameters:
+ - name: container_id
+ type: string
+ required: true
+ label:
+ en_US: Container Id
+ zh_Hans: 群聊或单聊的 ID
+ human_description:
+ en_US: The ID of the group chat or single chat. Refer to the group ID description for how to obtain it. https://open.feishu.cn/document/server-docs/group/chat/chat-id-description
+ zh_Hans: 群聊或单聊的 ID,获取方式参见群 ID 说明。https://open.feishu.cn/document/server-docs/group/chat/chat-id-description
+ llm_description: 群聊或单聊的 ID,获取方式参见群 ID 说明。https://open.feishu.cn/document/server-docs/group/chat/chat-id-description
+ form: llm
+
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 起始时间
+ human_description:
+ en_US: The start time for querying historical messages, formatted as "2006-01-02 15:04:05".
+ zh_Hans: 待查询历史信息的起始时间,格式为 "2006-01-02 15:04:05"。
+ llm_description: 待查询历史信息的起始时间,格式为 "2006-01-02 15:04:05"。
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 结束时间
+ human_description:
+ en_US: The end time for querying historical messages, formatted as "2006-01-02 15:04:05".
+ zh_Hans: 待查询历史信息的结束时间,格式为 "2006-01-02 15:04:05"。
+ llm_description: 待查询历史信息的结束时间,格式为 "2006-01-02 15:04:05"。
+ form: llm
+
+ - name: sort_type
+ type: select
+ required: false
+ options:
+ - value: ByCreateTimeAsc
+ label:
+ en_US: ByCreateTimeAsc
+ zh_Hans: ByCreateTimeAsc
+ - value: ByCreateTimeDesc
+ label:
+ en_US: ByCreateTimeDesc
+ zh_Hans: ByCreateTimeDesc
+ default: "ByCreateTimeAsc"
+ label:
+ en_US: Sort Type
+ zh_Hans: 排序方式
+ human_description:
+ en_US: |
+ The message sorting method. Optional values are ByCreateTimeAsc: sorted in ascending order by message creation time; ByCreateTimeDesc: sorted in descending order by message creation time. The default value is ByCreateTimeAsc. Note: When using page_token for pagination requests, the sorting method (sort_type) is consistent with the first request and cannot be changed midway.
+ zh_Hans: |
+ 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。
+ llm_description: 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。
+ form: form
+
+ - name: page_size
+ type: number
+ required: false
+ default: 20
+ label:
+ en_US: Page Size
+ zh_Hans: 分页大小
+ human_description:
+ en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
+ zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
+ llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
+ form: llm
+
+ - name: page_token
+ type: string
+ required: false
+ label:
+ en_US: Page Token
+ zh_Hans: 分页标记
+ human_description:
+ en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal.
+ zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.py b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.py
new file mode 100644
index 0000000000..3b14f46e00
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.py
@@ -0,0 +1,21 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class GetChatMessagesTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ container_id = tool_parameters.get("container_id")
+ page_token = tool_parameters.get("page_token")
+ sort_type = tool_parameters.get("sort_type", "ByCreateTimeAsc")
+ page_size = tool_parameters.get("page_size", 20)
+
+ res = client.get_thread_messages(container_id, page_token, sort_type, page_size)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml
new file mode 100644
index 0000000000..8d5fed9d0b
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_message/tools/get_thread_messages.yaml
@@ -0,0 +1,72 @@
+identity:
+ name: get_thread_messages
+ author: Doug Lea
+ label:
+ en_US: Get Thread Messages
+ zh_Hans: 获取指定话题的消息历史
+description:
+ human:
+ en_US: Get Thread Messages
+ zh_Hans: 获取指定话题的消息历史
+ llm: A tool for getting chat messages from specific threads.(获取指定话题的消息历史)
+parameters:
+ - name: container_id
+ type: string
+ required: true
+ label:
+ en_US: Thread Id
+ zh_Hans: 话题 ID
+ human_description:
+ en_US: The ID of the thread. Refer to the thread overview on how to obtain the thread_id. https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction
+ zh_Hans: 话题 ID,获取方式参见话题概述的如何获取 thread_id 章节。https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction
+ llm_description: 话题 ID,获取方式参见话题概述的如何获取 thread_id 章节。https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/reference/im-v1/message/thread-introduction
+ form: llm
+
+ - name: sort_type
+ type: select
+ required: false
+ options:
+ - value: ByCreateTimeAsc
+ label:
+ en_US: ByCreateTimeAsc
+ zh_Hans: ByCreateTimeAsc
+ - value: ByCreateTimeDesc
+ label:
+ en_US: ByCreateTimeDesc
+ zh_Hans: ByCreateTimeDesc
+ default: "ByCreateTimeAsc"
+ label:
+ en_US: Sort Type
+ zh_Hans: 排序方式
+ human_description:
+ en_US: |
+ The message sorting method. Optional values are ByCreateTimeAsc: sorted in ascending order by message creation time; ByCreateTimeDesc: sorted in descending order by message creation time. The default value is ByCreateTimeAsc. Note: When using page_token for pagination requests, the sorting method (sort_type) is consistent with the first request and cannot be changed midway.
+ zh_Hans: |
+ 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。
+ llm_description: 消息排序方式,可选值有 ByCreateTimeAsc:按消息创建时间升序排列;ByCreateTimeDesc:按消息创建时间降序排列。默认值为:ByCreateTimeAsc。注意:使用 page_token 分页请求时,排序方式(sort_type)均与第一次请求一致,不支持中途改换排序方式。
+ form: form
+
+ - name: page_size
+ type: number
+ required: false
+ default: 20
+ label:
+ en_US: Page Size
+ zh_Hans: 分页大小
+ human_description:
+ en_US: The page size, i.e., the number of data entries returned in a single request. The default value is 20, and the value range is [1,50].
+ zh_Hans: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
+ llm_description: 分页大小,即单次请求所返回的数据条目数。默认值为 20,取值范围为 [1,50]。
+ form: llm
+
+ - name: page_token
+ type: string
+ required: false
+ label:
+ en_US: Page Token
+ zh_Hans: 分页标记
+ human_description:
+ en_US: The pagination token. Leave it blank for the first request, indicating to start traversing from the beginning; when the pagination query result has more items, a new page_token will be returned simultaneously, which can be used to obtain the query result in the next traversal.
+ zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/send_bot_message.yaml b/api/core/tools/provider/builtin/feishu_message/tools/send_bot_message.yaml
index 6e398b18ab..4f7f65a8a7 100644
--- a/api/core/tools/provider/builtin/feishu_message/tools/send_bot_message.yaml
+++ b/api/core/tools/provider/builtin/feishu_message/tools/send_bot_message.yaml
@@ -10,53 +10,53 @@ description:
zh_Hans: 发送飞书应用消息
llm: A tool for sending Feishu application messages.
parameters:
+ - name: receive_id
+ type: string
+ required: true
+ label:
+ en_US: receive_id
+ zh_Hans: 消息接收者的 ID
+ human_description:
+ en_US: The ID of the message receiver, the ID type is consistent with the value of the query parameter receive_id_type.
+ zh_Hans: 消息接收者的 ID,ID 类型与查询参数 receive_id_type 的取值一致。
+ llm_description: 消息接收者的 ID,ID 类型与查询参数 receive_id_type 的取值一致。
+ form: llm
+
- name: receive_id_type
type: select
required: true
options:
- value: open_id
label:
- en_US: open id
- zh_Hans: open id
+ en_US: open_id
+ zh_Hans: open_id
- value: union_id
label:
- en_US: union id
- zh_Hans: union id
+ en_US: union_id
+ zh_Hans: union_id
- value: user_id
label:
- en_US: user id
- zh_Hans: user id
+ en_US: user_id
+ zh_Hans: user_id
- value: email
label:
en_US: email
zh_Hans: email
- value: chat_id
label:
- en_US: chat id
- zh_Hans: chat id
+ en_US: chat_id
+ zh_Hans: chat_id
label:
- en_US: User ID Type
- zh_Hans: 用户 ID 类型
+ en_US: receive_id_type
+ zh_Hans: 消息接收者的 ID 类型
human_description:
- en_US: User ID Type
- zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id。
- llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id。
- form: llm
-
- - name: receive_id
- type: string
- required: true
- label:
- en_US: Receive Id
- zh_Hans: 消息接收者的 ID
- human_description:
- en_US: The ID of the message receiver. The ID type should correspond to the query parameter receive_id_type.
- zh_Hans: 消息接收者的 ID,ID 类型应与查询参数 receive_id_type 对应。
- llm_description: 消息接收者的 ID,ID 类型应与查询参数 receive_id_type 对应。
- form: llm
+ en_US: The ID type of the message receiver, optional values are open_id, union_id, user_id, email, chat_id, with a default value of open_id.
+ zh_Hans: 消息接收者的 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id,默认值为 open_id。
+ llm_description: 消息接收者的 ID 类型,可选值有 open_id、union_id、user_id、email、chat_id,默认值为 open_id。
+ form: form
- name: msg_type
- type: string
+ type: select
required: true
options:
- value: text
@@ -65,27 +65,61 @@ parameters:
zh_Hans: 文本
- value: interactive
label:
- en_US: message card
- zh_Hans: 消息卡片
+ en_US: interactive
+ zh_Hans: 卡片
+ - value: post
+ label:
+ en_US: post
+ zh_Hans: 富文本
+ - value: image
+ label:
+ en_US: image
+ zh_Hans: 图片
+ - value: file
+ label:
+ en_US: file
+ zh_Hans: 文件
+ - value: audio
+ label:
+ en_US: audio
+ zh_Hans: 语音
+ - value: media
+ label:
+ en_US: media
+ zh_Hans: 视频
+ - value: sticker
+ label:
+ en_US: sticker
+ zh_Hans: 表情包
+ - value: share_chat
+ label:
+ en_US: share_chat
+ zh_Hans: 分享群名片
+ - value: share_user
+ label:
+ en_US: share_user
+ zh_Hans: 分享个人名片
+ - value: system
+ label:
+ en_US: system
+ zh_Hans: 系统消息
label:
- en_US: Message type
+ en_US: msg_type
zh_Hans: 消息类型
human_description:
- en_US: Message type, optional values are, text (text), interactive (message card).
- zh_Hans: 消息类型,可选值有:text(文本)、interactive(消息卡片)。
- llm_description: 消息类型,可选值有:text(文本)、interactive(消息卡片)。
- form: llm
+ en_US: Message type. Optional values are text, post, image, file, audio, media, sticker, interactive, share_chat, share_user, system. For detailed introduction of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json).
+ zh_Hans: 消息类型。可选值有:text、post、image、file、audio、media、sticker、interactive、share_chat、share_user、system。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ llm_description: 消息类型。可选值有:text、post、image、file、audio、media、sticker、interactive、share_chat、share_user、system。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ form: form
- name: content
type: string
required: true
label:
- en_US: Message content
+ en_US: content
zh_Hans: 消息内容
human_description:
- en_US: Message content
- zh_Hans: |
- 消息内容,JSON 结构序列化后的字符串。不同 msg_type 对应不同内容,
- 具体格式说明参考:https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json
- llm_description: 消息内容,JSON 结构序列化后的字符串。不同 msg_type 对应不同内容。
+ en_US: Message content, a JSON structure serialized string. The value of this parameter corresponds to msg_type. For example, if msg_type is text, this parameter needs to pass in text type content. To understand the format and usage limitations of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json).
+ zh_Hans: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ llm_description: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
form: llm
diff --git a/api/core/tools/provider/builtin/feishu_message/tools/send_webhook_message.yaml b/api/core/tools/provider/builtin/feishu_message/tools/send_webhook_message.yaml
index 8b39ce4874..eeeae8b29c 100644
--- a/api/core/tools/provider/builtin/feishu_message/tools/send_webhook_message.yaml
+++ b/api/core/tools/provider/builtin/feishu_message/tools/send_webhook_message.yaml
@@ -15,15 +15,18 @@ parameters:
required: true
label:
en_US: webhook
- zh_Hans: webhook 的地址
+ zh_Hans: webhook
human_description:
- en_US: The address of the webhook
- zh_Hans: webhook 的地址
- llm_description: webhook 的地址
+ en_US: |
+ The address of the webhook, the format of the webhook address corresponding to the bot is as follows: https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx. For details, please refer to: Feishu Custom Bot Usage Guide(https://open.larkoffice.com/document/client-docs/bot-v3/add-custom-bot)
+ zh_Hans: |
+ webhook 的地址,机器人对应的 webhook 地址格式如下: https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx,详情可参考: 飞书自定义机器人使用指南(https://open.larkoffice.com/document/client-docs/bot-v3/add-custom-bot)
+ llm_description: |
+ webhook 的地址,机器人对应的 webhook 地址格式如下: https://open.feishu.cn/open-apis/bot/v2/hook/xxxxxxxxxxxxxxxxx,详情可参考: 飞书自定义机器人使用指南(https://open.larkoffice.com/document/client-docs/bot-v3/add-custom-bot)
form: llm
- name: msg_type
- type: string
+ type: select
required: true
options:
- value: text
@@ -32,27 +35,34 @@ parameters:
zh_Hans: 文本
- value: interactive
label:
- en_US: message card
- zh_Hans: 消息卡片
+ en_US: interactive
+ zh_Hans: 卡片
+ - value: image
+ label:
+ en_US: image
+ zh_Hans: 图片
+ - value: share_chat
+ label:
+ en_US: share_chat
+ zh_Hans: 分享群名片
label:
- en_US: Message type
+ en_US: msg_type
zh_Hans: 消息类型
human_description:
- en_US: Message type, optional values are, text (text), interactive (message card).
- zh_Hans: 消息类型,可选值有:text(文本)、interactive(消息卡片)。
- llm_description: 消息类型,可选值有:text(文本)、interactive(消息卡片)。
- form: llm
+ en_US: Message type. Optional values are text, image, interactive, share_chat. For detailed introduction of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json).
+ zh_Hans: 消息类型。可选值有:text、image、interactive、share_chat。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ llm_description: 消息类型。可选值有:text、image、interactive、share_chat。不同消息类型的详细介绍,参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ form: form
+
- name: content
type: string
required: true
label:
- en_US: Message content
+ en_US: content
zh_Hans: 消息内容
human_description:
- en_US: Message content
- zh_Hans: |
- 消息内容,JSON 结构序列化后的字符串。不同 msg_type 对应不同内容,
- 具体格式说明参考:https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json
- llm_description: 消息内容,JSON 结构序列化后的字符串。不同 msg_type 对应不同内容。
+ en_US: Message content, a JSON structure serialized string. The value of this parameter corresponds to msg_type. For example, if msg_type is text, this parameter needs to pass in text type content. To understand the format and usage limitations of different message types, refer to the message content(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json).
+ zh_Hans: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
+ llm_description: 消息内容,JSON 结构序列化后的字符串。该参数的取值与 msg_type 对应,例如 msg_type 取值为 text,则该参数需要传入文本类型的内容。了解不同类型的消息内容格式、使用限制,可参见发送消息内容(https://open.larkoffice.com/document/server-docs/im-v1/message-content-description/create_json)。
form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/_assets/icon.png b/api/core/tools/provider/builtin/feishu_spreadsheet/_assets/icon.png
new file mode 100644
index 0000000000..258b361261
Binary files /dev/null and b/api/core/tools/provider/builtin/feishu_spreadsheet/_assets/icon.png differ
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.py b/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.py
new file mode 100644
index 0000000000..a3b5473769
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.py
@@ -0,0 +1,7 @@
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+from core.tools.utils.feishu_api_utils import auth
+
+
+class FeishuMessageProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.yaml
new file mode 100644
index 0000000000..29e448d730
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/feishu_spreadsheet.yaml
@@ -0,0 +1,36 @@
+identity:
+ author: Doug Lea
+ name: feishu_spreadsheet
+ label:
+ en_US: Feishu Spreadsheet
+ zh_Hans: 飞书电子表格
+ description:
+ en_US: |
+ Feishu Spreadsheet, requires the following permissions: sheets:spreadsheet.
+ zh_Hans: |
+ 飞书电子表格,需要开通以下权限: sheets:spreadsheet。
+ icon: icon.png
+ tags:
+ - social
+ - productivity
+credentials_for_provider:
+ app_id:
+ type: text-input
+ required: true
+ label:
+ en_US: APP ID
+ placeholder:
+ en_US: Please input your feishu app id
+ zh_Hans: 请输入你的飞书 app id
+ help:
+ en_US: Get your app_id and app_secret from Feishu
+ zh_Hans: 从飞书获取您的 app_id 和 app_secret
+ url: https://open.larkoffice.com/app
+ app_secret:
+ type: secret-input
+ required: true
+ label:
+ en_US: APP Secret
+ placeholder:
+ en_US: Please input your app secret
+ zh_Hans: 请输入你的飞书 app secret
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.py
new file mode 100644
index 0000000000..44d062f9bd
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.py
@@ -0,0 +1,22 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class AddColsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ sheet_id = tool_parameters.get("sheet_id")
+ sheet_name = tool_parameters.get("sheet_name")
+ length = tool_parameters.get("length")
+ values = tool_parameters.get("values")
+
+ res = client.add_cols(spreadsheet_token, sheet_id, sheet_name, length, values)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml
new file mode 100644
index 0000000000..ef457f8e00
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_cols.yaml
@@ -0,0 +1,72 @@
+identity:
+ name: add_cols
+ author: Doug Lea
+ label:
+ en_US: Add Cols
+ zh_Hans: 新增多列至工作表最后
+description:
+ human:
+ en_US: Add Cols
+ zh_Hans: 新增多列至工作表最后
+ llm: A tool for adding multiple columns to the end of a spreadsheet. (新增多列至工作表最后)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: spreadsheet_token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 url。
+ llm_description: 电子表格 token,支持输入电子表格 url。
+ form: llm
+
+ - name: sheet_id
+ type: string
+ required: false
+ label:
+ en_US: sheet_id
+ zh_Hans: 工作表 ID
+ human_description:
+ en_US: Sheet ID, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。
+ llm_description: 工作表 ID,与 sheet_name 二者其一必填。
+ form: llm
+
+ - name: sheet_name
+ type: string
+ required: false
+ label:
+ en_US: sheet_name
+ zh_Hans: 工作表名称
+ human_description:
+ en_US: Sheet name, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表名称,与 sheet_id 二者其一必填。
+ llm_description: 工作表名称,与 sheet_id 二者其一必填。
+ form: llm
+
+ - name: length
+ type: number
+ required: true
+ label:
+ en_US: length
+ zh_Hans: 要增加的列数
+ human_description:
+ en_US: Number of columns to add, range (0-5000].
+ zh_Hans: 要增加的列数,范围(0-5000]。
+ llm_description: 要增加的列数,范围(0-5000]。
+ form: llm
+
+ - name: values
+ type: string
+ required: false
+ label:
+ en_US: values
+ zh_Hans: 新增列的单元格内容
+ human_description:
+ en_US: |
+ Content of the new columns, array of objects in string format, each array represents a row of table data, format like: [ [ "ID","Name","Age" ],[ 1,"Zhang San",10 ],[ 2,"Li Si",11 ] ].
+ zh_Hans: 新增列的单元格内容,数组对象字符串,每个数组一行表格数据,格式:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。
+ llm_description: 新增列的单元格内容,数组对象字符串,每个数组一行表格数据,格式:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.py
new file mode 100644
index 0000000000..3a85b7b46c
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.py
@@ -0,0 +1,22 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class AddRowsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ sheet_id = tool_parameters.get("sheet_id")
+ sheet_name = tool_parameters.get("sheet_name")
+ length = tool_parameters.get("length")
+ values = tool_parameters.get("values")
+
+ res = client.add_rows(spreadsheet_token, sheet_id, sheet_name, length, values)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml
new file mode 100644
index 0000000000..37653325ae
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/add_rows.yaml
@@ -0,0 +1,72 @@
+identity:
+ name: add_rows
+ author: Doug Lea
+ label:
+ en_US: Add Rows
+ zh_Hans: 新增多行至工作表最后
+description:
+ human:
+ en_US: Add Rows
+ zh_Hans: 新增多行至工作表最后
+ llm: A tool for adding multiple rows to the end of a spreadsheet. (新增多行至工作表最后)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: spreadsheet_token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 url。
+ llm_description: 电子表格 token,支持输入电子表格 url。
+ form: llm
+
+ - name: sheet_id
+ type: string
+ required: false
+ label:
+ en_US: sheet_id
+ zh_Hans: 工作表 ID
+ human_description:
+ en_US: Sheet ID, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。
+ llm_description: 工作表 ID,与 sheet_name 二者其一必填。
+ form: llm
+
+ - name: sheet_name
+ type: string
+ required: false
+ label:
+ en_US: sheet_name
+ zh_Hans: 工作表名称
+ human_description:
+ en_US: Sheet name, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表名称,与 sheet_id 二者其一必填。
+ llm_description: 工作表名称,与 sheet_id 二者其一必填。
+ form: llm
+
+ - name: length
+ type: number
+ required: true
+ label:
+ en_US: length
+ zh_Hans: 要增加行数
+ human_description:
+ en_US: Number of rows to add, range (0-5000].
+ zh_Hans: 要增加行数,范围(0-5000]。
+ llm_description: 要增加行数,范围(0-5000]。
+ form: llm
+
+ - name: values
+ type: string
+ required: false
+ label:
+ en_US: values
+ zh_Hans: 新增行的表格内容
+ human_description:
+ en_US: |
+ Content of the new rows, array of objects in string format, each array represents a row of table data, format like: [ [ "ID","Name","Age" ],[ 1,"Zhang San",10 ],[ 2,"Li Si",11 ] ].
+ zh_Hans: 新增行的表格内容,数组对象字符串,每个数组一行表格数据,格式,如:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。
+ llm_description: 新增行的表格内容,数组对象字符串,每个数组一行表格数据,格式,如:[["编号","姓名","年龄"],[1,"张三",10],[2,"李四",11]]。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.py
new file mode 100644
index 0000000000..647364fab0
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.py
@@ -0,0 +1,19 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class CreateSpreadsheetTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ title = tool_parameters.get("title")
+ folder_token = tool_parameters.get("folder_token")
+
+ res = client.create_spreadsheet(title, folder_token)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.yaml
new file mode 100644
index 0000000000..931310e631
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/create_spreadsheet.yaml
@@ -0,0 +1,35 @@
+identity:
+ name: create_spreadsheet
+ author: Doug Lea
+ label:
+ en_US: Create Spreadsheet
+ zh_Hans: 创建电子表格
+description:
+ human:
+ en_US: Create Spreadsheet
+ zh_Hans: 创建电子表格
+ llm: A tool for creating spreadsheets. (创建电子表格)
+parameters:
+ - name: title
+ type: string
+ required: false
+ label:
+ en_US: Spreadsheet Title
+ zh_Hans: 电子表格标题
+ human_description:
+ en_US: The title of the spreadsheet
+ zh_Hans: 电子表格的标题
+ llm_description: 电子表格的标题
+ form: llm
+
+ - name: folder_token
+ type: string
+ required: false
+ label:
+ en_US: Folder Token
+ zh_Hans: 文件夹 token
+ human_description:
+ en_US: The token of the folder, supports folder URL input, e.g., https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg
+ zh_Hans: 文件夹 token,支持文件夹 URL 输入,如:https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg
+ llm_description: 文件夹 token,支持文件夹 URL 输入,如:https://bytedance.larkoffice.com/drive/folder/CxHEf4DCSlNkL2dUTCJcPRgentg
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.py
new file mode 100644
index 0000000000..dda8c59daf
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.py
@@ -0,0 +1,19 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class GetSpreadsheetTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+
+ res = client.get_spreadsheet(spreadsheet_token, user_id_type)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.yaml
new file mode 100644
index 0000000000..c519938617
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/get_spreadsheet.yaml
@@ -0,0 +1,49 @@
+identity:
+ name: get_spreadsheet
+ author: Doug Lea
+ label:
+ en_US: Get Spreadsheet
+ zh_Hans: 获取电子表格信息
+description:
+ human:
+ en_US: Get Spreadsheet
+ zh_Hans: 获取电子表格信息
+ llm: A tool for getting information from spreadsheets. (获取电子表格信息)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: Spreadsheet Token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 URL。
+ llm_description: 电子表格 token,支持输入电子表格 URL。
+ form: llm
+
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.py
new file mode 100644
index 0000000000..98497791c0
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.py
@@ -0,0 +1,18 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class ListSpreadsheetSheetsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+
+ res = client.list_spreadsheet_sheets(spreadsheet_token)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.yaml
new file mode 100644
index 0000000000..c6a7ef45d4
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/list_spreadsheet_sheets.yaml
@@ -0,0 +1,23 @@
+identity:
+ name: list_spreadsheet_sheets
+ author: Doug Lea
+ label:
+ en_US: List Spreadsheet Sheets
+ zh_Hans: 列出电子表格所有工作表
+description:
+ human:
+ en_US: List Spreadsheet Sheets
+ zh_Hans: 列出电子表格所有工作表
+ llm: A tool for listing all sheets in a spreadsheet. (列出电子表格所有工作表)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: Spreadsheet Token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 URL。
+ llm_description: 电子表格 token,支持输入电子表格 URL。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.py
new file mode 100644
index 0000000000..ebe3f619d0
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class ReadColsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ sheet_id = tool_parameters.get("sheet_id")
+ sheet_name = tool_parameters.get("sheet_name")
+ start_col = tool_parameters.get("start_col")
+ num_cols = tool_parameters.get("num_cols")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+
+ res = client.read_cols(spreadsheet_token, sheet_id, sheet_name, start_col, num_cols, user_id_type)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml
new file mode 100644
index 0000000000..3273857b70
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_cols.yaml
@@ -0,0 +1,97 @@
+identity:
+ name: read_cols
+ author: Doug Lea
+ label:
+ en_US: Read Cols
+ zh_Hans: 读取工作表列数据
+description:
+ human:
+ en_US: Read Cols
+ zh_Hans: 读取工作表列数据
+ llm: A tool for reading column data from a spreadsheet. (读取工作表列数据)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: spreadsheet_token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 url。
+ llm_description: 电子表格 token,支持输入电子表格 url。
+ form: llm
+
+ - name: sheet_id
+ type: string
+ required: false
+ label:
+ en_US: sheet_id
+ zh_Hans: 工作表 ID
+ human_description:
+ en_US: Sheet ID, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。
+ llm_description: 工作表 ID,与 sheet_name 二者其一必填。
+ form: llm
+
+ - name: sheet_name
+ type: string
+ required: false
+ label:
+ en_US: sheet_name
+ zh_Hans: 工作表名称
+ human_description:
+ en_US: Sheet name, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表名称,与 sheet_id 二者其一必填。
+ llm_description: 工作表名称,与 sheet_id 二者其一必填。
+ form: llm
+
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
+
+ - name: start_col
+ type: number
+ required: false
+ label:
+ en_US: start_col
+ zh_Hans: 起始列号
+ human_description:
+ en_US: Starting column number, starting from 1.
+ zh_Hans: 起始列号,从 1 开始。
+ llm_description: 起始列号,从 1 开始。
+ form: llm
+
+ - name: num_cols
+ type: number
+ required: true
+ label:
+ en_US: num_cols
+ zh_Hans: 读取列数
+ human_description:
+ en_US: Number of columns to read.
+ zh_Hans: 读取列数
+ llm_description: 读取列数
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.py
new file mode 100644
index 0000000000..86b91b104b
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class ReadRowsTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ sheet_id = tool_parameters.get("sheet_id")
+ sheet_name = tool_parameters.get("sheet_name")
+ start_row = tool_parameters.get("start_row")
+ num_rows = tool_parameters.get("num_rows")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+
+ res = client.read_rows(spreadsheet_token, sheet_id, sheet_name, start_row, num_rows, user_id_type)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml
new file mode 100644
index 0000000000..3e9206e8ef
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_rows.yaml
@@ -0,0 +1,97 @@
+identity:
+ name: read_rows
+ author: Doug Lea
+ label:
+ en_US: Read Rows
+ zh_Hans: 读取工作表行数据
+description:
+ human:
+ en_US: Read Rows
+ zh_Hans: 读取工作表行数据
+ llm: A tool for reading row data from a spreadsheet. (读取工作表行数据)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: spreadsheet_token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 url。
+ llm_description: 电子表格 token,支持输入电子表格 url。
+ form: llm
+
+ - name: sheet_id
+ type: string
+ required: false
+ label:
+ en_US: sheet_id
+ zh_Hans: 工作表 ID
+ human_description:
+ en_US: Sheet ID, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。
+ llm_description: 工作表 ID,与 sheet_name 二者其一必填。
+ form: llm
+
+ - name: sheet_name
+ type: string
+ required: false
+ label:
+ en_US: sheet_name
+ zh_Hans: 工作表名称
+ human_description:
+ en_US: Sheet name, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表名称,与 sheet_id 二者其一必填。
+ llm_description: 工作表名称,与 sheet_id 二者其一必填。
+ form: llm
+
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
+
+ - name: start_row
+ type: number
+ required: false
+ label:
+ en_US: start_row
+ zh_Hans: 起始行号
+ human_description:
+ en_US: Starting row number, starting from 1.
+ zh_Hans: 起始行号,从 1 开始。
+ llm_description: 起始行号,从 1 开始。
+ form: llm
+
+ - name: num_rows
+ type: number
+ required: true
+ label:
+ en_US: num_rows
+ zh_Hans: 读取行数
+ human_description:
+ en_US: Number of rows to read.
+ zh_Hans: 读取行数
+ llm_description: 读取行数
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.py b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.py
new file mode 100644
index 0000000000..ddd607d878
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class ReadTableTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ spreadsheet_token = tool_parameters.get("spreadsheet_token")
+ sheet_id = tool_parameters.get("sheet_id")
+ sheet_name = tool_parameters.get("sheet_name")
+ num_range = tool_parameters.get("num_range")
+ query = tool_parameters.get("query")
+ user_id_type = tool_parameters.get("user_id_type", "open_id")
+
+ res = client.read_table(spreadsheet_token, sheet_id, sheet_name, num_range, query, user_id_type)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml
new file mode 100644
index 0000000000..e3dc80e1eb
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_spreadsheet/tools/read_table.yaml
@@ -0,0 +1,122 @@
+identity:
+ name: read_table
+ author: Doug Lea
+ label:
+ en_US: Read Table
+ zh_Hans: 自定义读取电子表格行列数据
+description:
+ human:
+ en_US: Read Table
+ zh_Hans: 自定义读取电子表格行列数据
+ llm: A tool for custom reading of row and column data from a spreadsheet. (自定义读取电子表格行列数据)
+parameters:
+ - name: spreadsheet_token
+ type: string
+ required: true
+ label:
+ en_US: spreadsheet_token
+ zh_Hans: 电子表格 token
+ human_description:
+ en_US: Spreadsheet token, supports input of spreadsheet URL.
+ zh_Hans: 电子表格 token,支持输入电子表格 url。
+ llm_description: 电子表格 token,支持输入电子表格 url。
+ form: llm
+
+ - name: sheet_id
+ type: string
+ required: false
+ label:
+ en_US: sheet_id
+ zh_Hans: 工作表 ID
+ human_description:
+ en_US: Sheet ID, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表 ID,与 sheet_name 二者其一必填。
+ llm_description: 工作表 ID,与 sheet_name 二者其一必填。
+ form: llm
+
+ - name: sheet_name
+ type: string
+ required: false
+ label:
+ en_US: sheet_name
+ zh_Hans: 工作表名称
+ human_description:
+ en_US: Sheet name, either sheet_id or sheet_name must be filled.
+ zh_Hans: 工作表名称,与 sheet_id 二者其一必填。
+ llm_description: 工作表名称,与 sheet_id 二者其一必填。
+ form: llm
+
+ - name: user_id_type
+ type: select
+ required: false
+ options:
+ - value: open_id
+ label:
+ en_US: open_id
+ zh_Hans: open_id
+ - value: union_id
+ label:
+ en_US: union_id
+ zh_Hans: union_id
+ - value: user_id
+ label:
+ en_US: user_id
+ zh_Hans: user_id
+ default: "open_id"
+ label:
+ en_US: user_id_type
+ zh_Hans: 用户 ID 类型
+ human_description:
+ en_US: User ID type, optional values are open_id, union_id, user_id, with a default value of open_id.
+ zh_Hans: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ llm_description: 用户 ID 类型,可选值有 open_id、union_id、user_id,默认值为 open_id。
+ form: form
+
+ - name: start_row
+ type: number
+ required: false
+ label:
+ en_US: start_row
+ zh_Hans: 起始行号
+ human_description:
+ en_US: Starting row number, starting from 1.
+ zh_Hans: 起始行号,从 1 开始。
+ llm_description: 起始行号,从 1 开始。
+ form: llm
+
+ - name: num_rows
+ type: number
+ required: false
+ label:
+ en_US: num_rows
+ zh_Hans: 读取行数
+ human_description:
+ en_US: Number of rows to read.
+ zh_Hans: 读取行数
+ llm_description: 读取行数
+ form: llm
+
+ - name: range
+ type: string
+ required: false
+ label:
+ en_US: range
+ zh_Hans: 取数范围
+ human_description:
+ en_US: |
+ Data range, format like: A1:B2, can be empty when query=all.
+ zh_Hans: 取数范围,格式如:A1:B2,query=all 时可为空。
+ llm_description: 取数范围,格式如:A1:B2,query=all 时可为空。
+ form: llm
+
+ - name: query
+ type: string
+ required: false
+ label:
+ en_US: query
+ zh_Hans: 查询
+ human_description:
+ en_US: Pass "all" to query all data in the table, but no more than 100 columns.
+ zh_Hans: 传 all,表示查询表格所有数据,但最多查询 100 列数据。
+ llm_description: 传 all,表示查询表格所有数据,但最多查询 100 列数据。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_task/_assets/icon.png b/api/core/tools/provider/builtin/feishu_task/_assets/icon.png
new file mode 100644
index 0000000000..3485be0d0f
Binary files /dev/null and b/api/core/tools/provider/builtin/feishu_task/_assets/icon.png differ
diff --git a/api/core/tools/provider/builtin/feishu_task/feishu_task.py b/api/core/tools/provider/builtin/feishu_task/feishu_task.py
new file mode 100644
index 0000000000..6df05968d8
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/feishu_task.py
@@ -0,0 +1,7 @@
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+from core.tools.utils.feishu_api_utils import auth
+
+
+class FeishuTaskProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_task/feishu_task.yaml b/api/core/tools/provider/builtin/feishu_task/feishu_task.yaml
new file mode 100644
index 0000000000..88736f79a0
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/feishu_task.yaml
@@ -0,0 +1,36 @@
+identity:
+ author: Doug Lea
+ name: feishu_task
+ label:
+ en_US: Feishu Task
+ zh_Hans: 飞书任务
+ description:
+ en_US: |
+ Feishu Task, requires the following permissions: task:task:write、contact:user.id:readonly.
+ zh_Hans: |
+ 飞书任务,需要开通以下权限: task:task:write、contact:user.id:readonly。
+ icon: icon.png
+ tags:
+ - social
+ - productivity
+credentials_for_provider:
+ app_id:
+ type: text-input
+ required: true
+ label:
+ en_US: APP ID
+ placeholder:
+ en_US: Please input your feishu app id
+ zh_Hans: 请输入你的飞书 app id
+ help:
+ en_US: Get your app_id and app_secret from Feishu
+ zh_Hans: 从飞书获取您的 app_id 和 app_secret
+ url: https://open.larkoffice.com/app
+ app_secret:
+ type: secret-input
+ required: true
+ label:
+ en_US: APP Secret
+ placeholder:
+ en_US: Please input your app secret
+ zh_Hans: 请输入你的飞书 app secret
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/add_members.py b/api/core/tools/provider/builtin/feishu_task/tools/add_members.py
new file mode 100644
index 0000000000..e58ed22e0f
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/add_members.py
@@ -0,0 +1,20 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class AddMembersTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ task_guid = tool_parameters.get("task_guid")
+ member_phone_or_email = tool_parameters.get("member_phone_or_email")
+ member_role = tool_parameters.get("member_role", "follower")
+
+ res = client.add_members(task_guid, member_phone_or_email, member_role)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/add_members.yaml b/api/core/tools/provider/builtin/feishu_task/tools/add_members.yaml
new file mode 100644
index 0000000000..063c0f7f04
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/add_members.yaml
@@ -0,0 +1,58 @@
+identity:
+ name: add_members
+ author: Doug Lea
+ label:
+ en_US: Add Members
+ zh_Hans: 添加任务成员
+description:
+ human:
+ en_US: Add Members
+ zh_Hans: 添加任务成员
+ llm: A tool for adding members to a Feishu task.(添加任务成员)
+parameters:
+ - name: task_guid
+ type: string
+ required: true
+ label:
+ en_US: Task GUID
+ zh_Hans: 任务 GUID
+ human_description:
+ en_US: |
+ The GUID of the task to be added, supports passing either the Task ID or the Task link URL. Example of Task ID: 8b5425ec-9f2a-43bd-a3ab-01912f50282b; Example of Task link URL: https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ zh_Hans: 要添加的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ llm_description: 要添加的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ form: llm
+
+ - name: member_phone_or_email
+ type: string
+ required: true
+ label:
+ en_US: Task Member Phone Or Email
+ zh_Hans: 任务成员的电话或邮箱
+ human_description:
+ en_US: A list of member emails or phone numbers, separated by commas.
+ zh_Hans: 任务成员邮箱或者手机号列表,使用逗号分隔。
+ llm_description: 任务成员邮箱或者手机号列表,使用逗号分隔。
+ form: llm
+
+ - name: member_role
+ type: select
+ required: true
+ options:
+ - value: assignee
+ label:
+ en_US: assignee
+ zh_Hans: 负责人
+ - value: follower
+ label:
+ en_US: follower
+ zh_Hans: 关注人
+ default: "follower"
+ label:
+ en_US: member_role
+ zh_Hans: 成员的角色
+ human_description:
+ en_US: Member role, optional values are "assignee" (responsible person) and "follower" (observer), with a default value of "assignee".
+ zh_Hans: 成员的角色,可选值有 "assignee"(负责人)和 "follower"(关注人),默认值为 "assignee"。
+ llm_description: 成员的角色,可选值有 "assignee"(负责人)和 "follower"(关注人),默认值为 "assignee"。
+ form: form
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/create_task.py b/api/core/tools/provider/builtin/feishu_task/tools/create_task.py
new file mode 100644
index 0000000000..96cdcd71f6
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/create_task.py
@@ -0,0 +1,22 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class CreateTaskTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ summary = tool_parameters.get("summary")
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ completed_time = tool_parameters.get("completed_time")
+ description = tool_parameters.get("description")
+
+ res = client.create_task(summary, start_time, end_time, completed_time, description)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/create_task.yaml b/api/core/tools/provider/builtin/feishu_task/tools/create_task.yaml
new file mode 100644
index 0000000000..7eb4af168b
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/create_task.yaml
@@ -0,0 +1,74 @@
+identity:
+ name: create_task
+ author: Doug Lea
+ label:
+ en_US: Create Task
+ zh_Hans: 创建飞书任务
+description:
+ human:
+ en_US: Create Feishu Task
+ zh_Hans: 创建飞书任务
+ llm: A tool for creating tasks in Feishu.(创建飞书任务)
+parameters:
+ - name: summary
+ type: string
+ required: true
+ label:
+ en_US: Task Title
+ zh_Hans: 任务标题
+ human_description:
+ en_US: The title of the task.
+ zh_Hans: 任务标题
+ llm_description: 任务标题
+ form: llm
+
+ - name: description
+ type: string
+ required: false
+ label:
+ en_US: Task Description
+ zh_Hans: 任务备注
+ human_description:
+ en_US: The description or notes for the task.
+ zh_Hans: 任务备注
+ llm_description: 任务备注
+ form: llm
+
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 任务开始时间
+ human_description:
+ en_US: |
+ The start time of the task, in the format: 2006-01-02 15:04:05
+ zh_Hans: 任务开始时间,格式为:2006-01-02 15:04:05
+ llm_description: 任务开始时间,格式为:2006-01-02 15:04:05
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 任务结束时间
+ human_description:
+ en_US: |
+ The end time of the task, in the format: 2006-01-02 15:04:05
+ zh_Hans: 任务结束时间,格式为:2006-01-02 15:04:05
+ llm_description: 任务结束时间,格式为:2006-01-02 15:04:05
+ form: llm
+
+ - name: completed_time
+ type: string
+ required: false
+ label:
+ en_US: Completed Time
+ zh_Hans: 任务完成时间
+ human_description:
+ en_US: |
+ The completion time of the task, in the format: 2006-01-02 15:04:05. Leave empty to create an incomplete task; fill in a specific time to create a completed task.
+ zh_Hans: 任务完成时间,格式为:2006-01-02 15:04:05,不填写表示创建一个未完成任务;填写一个具体的时间表示创建一个已完成任务。
+ llm_description: 任务完成时间,格式为:2006-01-02 15:04:05,不填写表示创建一个未完成任务;填写一个具体的时间表示创建一个已完成任务。
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/delete_task.py b/api/core/tools/provider/builtin/feishu_task/tools/delete_task.py
new file mode 100644
index 0000000000..dee036fee5
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/delete_task.py
@@ -0,0 +1,18 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class UpdateTaskTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ task_guid = tool_parameters.get("task_guid")
+
+ res = client.delete_task(task_guid)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/delete_task.yaml b/api/core/tools/provider/builtin/feishu_task/tools/delete_task.yaml
new file mode 100644
index 0000000000..d3f9741367
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/delete_task.yaml
@@ -0,0 +1,24 @@
+identity:
+ name: delete_task
+ author: Doug Lea
+ label:
+ en_US: Delete Task
+ zh_Hans: 删除飞书任务
+description:
+ human:
+ en_US: Delete Task
+ zh_Hans: 删除飞书任务
+ llm: A tool for deleting tasks in Feishu.(删除飞书任务)
+parameters:
+ - name: task_guid
+ type: string
+ required: true
+ label:
+ en_US: Task GUID
+ zh_Hans: 任务 GUID
+ human_description:
+ en_US: |
+ The GUID of the task to be deleted, supports passing either the Task ID or the Task link URL. Example of Task ID: 8b5425ec-9f2a-43bd-a3ab-01912f50282b; Example of Task link URL: https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ zh_Hans: 要删除的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ llm_description: 要删除的任务的 GUID,支持传任务 ID 和任务链接 URL。任务 ID 示例:8b5425ec-9f2a-43bd-a3ab-01912f50282b;任务链接 URL 示例:https://applink.feishu-pre.net/client/todo/detail?guid=8c6bf822-e4da-449a-b82a-dc44020f9be9&suite_entity_num=t21587362
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/update_task.py b/api/core/tools/provider/builtin/feishu_task/tools/update_task.py
new file mode 100644
index 0000000000..4a48cd283a
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/update_task.py
@@ -0,0 +1,23 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class UpdateTaskTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ task_guid = tool_parameters.get("task_guid")
+ summary = tool_parameters.get("summary")
+ start_time = tool_parameters.get("start_time")
+ end_time = tool_parameters.get("end_time")
+ completed_time = tool_parameters.get("completed_time")
+ description = tool_parameters.get("description")
+
+ res = client.update_task(task_guid, summary, start_time, end_time, completed_time, description)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_task/tools/update_task.yaml b/api/core/tools/provider/builtin/feishu_task/tools/update_task.yaml
new file mode 100644
index 0000000000..83c9bcb1c4
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_task/tools/update_task.yaml
@@ -0,0 +1,89 @@
+identity:
+ name: update_task
+ author: Doug Lea
+ label:
+ en_US: Update Task
+ zh_Hans: 更新飞书任务
+description:
+ human:
+ en_US: Update Feishu Task
+ zh_Hans: 更新飞书任务
+ llm: A tool for updating tasks in Feishu.(更新飞书任务)
+parameters:
+ - name: task_guid
+ type: string
+ required: true
+ label:
+ en_US: Task GUID
+ zh_Hans: 任务 GUID
+ human_description:
+ en_US: |
+ The task ID, supports inputting either the Task ID or the Task link URL. Example of Task ID: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64; Example of Task link URL: https://applink.feishu-pre.net/client/todo/detail?guid=42cad8a0-f8c8-4344-9be2-d1d7e8e91b64&suite_entity_num=t21700217
+ zh_Hans: |
+ 任务ID,支持传入任务 ID 和任务链接 URL。任务 ID 示例: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64;任务链接 URL 示例: https://applink.feishu-pre.net/client/todo/detail?guid=42cad8a0-f8c8-4344-9be2-d1d7e8e91b64&suite_entity_num=t21700217
+ llm_description: |
+ 任务ID,支持传入任务 ID 和任务链接 URL。任务 ID 示例: 42cad8a0-f8c8-4344-9be2-d1d7e8e91b64;任务链接 URL 示例: https://applink.feishu-pre.net/client/todo/detail?guid=42cad8a0-f8c8-4344-9be2-d1d7e8e91b64&suite_entity_num=t21700217
+ form: llm
+
+ - name: summary
+ type: string
+ required: true
+ label:
+ en_US: Task Title
+ zh_Hans: 任务标题
+ human_description:
+ en_US: The title of the task.
+ zh_Hans: 任务标题
+ llm_description: 任务标题
+ form: llm
+
+ - name: description
+ type: string
+ required: false
+ label:
+ en_US: Task Description
+ zh_Hans: 任务备注
+ human_description:
+ en_US: The description or notes for the task.
+ zh_Hans: 任务备注
+ llm_description: 任务备注
+ form: llm
+
+ - name: start_time
+ type: string
+ required: false
+ label:
+ en_US: Start Time
+ zh_Hans: 任务开始时间
+ human_description:
+ en_US: |
+ The start time of the task, in the format: 2006-01-02 15:04:05
+ zh_Hans: 任务开始时间,格式为:2006-01-02 15:04:05
+ llm_description: 任务开始时间,格式为:2006-01-02 15:04:05
+ form: llm
+
+ - name: end_time
+ type: string
+ required: false
+ label:
+ en_US: End Time
+ zh_Hans: 任务结束时间
+ human_description:
+ en_US: |
+ The end time of the task, in the format: 2006-01-02 15:04:05
+ zh_Hans: 任务结束时间,格式为:2006-01-02 15:04:05
+ llm_description: 任务结束时间,格式为:2006-01-02 15:04:05
+ form: llm
+
+ - name: completed_time
+ type: string
+ required: false
+ label:
+ en_US: Completed Time
+ zh_Hans: 任务完成时间
+ human_description:
+ en_US: |
+ The completion time of the task, in the format: 2006-01-02 15:04:05
+ zh_Hans: 任务完成时间,格式为:2006-01-02 15:04:05
+ llm_description: 任务完成时间,格式为:2006-01-02 15:04:05
+ form: llm
diff --git a/api/core/tools/provider/builtin/feishu_wiki/_assets/icon.png b/api/core/tools/provider/builtin/feishu_wiki/_assets/icon.png
new file mode 100644
index 0000000000..878672c9ae
Binary files /dev/null and b/api/core/tools/provider/builtin/feishu_wiki/_assets/icon.png differ
diff --git a/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.py b/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.py
new file mode 100644
index 0000000000..6c5fccb1a3
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.py
@@ -0,0 +1,7 @@
+from core.tools.provider.builtin_tool_provider import BuiltinToolProviderController
+from core.tools.utils.feishu_api_utils import auth
+
+
+class FeishuWikiProvider(BuiltinToolProviderController):
+ def _validate_credentials(self, credentials: dict) -> None:
+ auth(credentials)
diff --git a/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.yaml b/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.yaml
new file mode 100644
index 0000000000..1fb5f71cbc
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_wiki/feishu_wiki.yaml
@@ -0,0 +1,36 @@
+identity:
+ author: Doug Lea
+ name: feishu_wiki
+ label:
+ en_US: Feishu Wiki
+ zh_Hans: 飞书知识库
+ description:
+ en_US: |
+ Feishu Wiki, requires the following permissions: wiki:wiki:readonly.
+ zh_Hans: |
+ 飞书知识库,需要开通以下权限: wiki:wiki:readonly。
+ icon: icon.png
+ tags:
+ - social
+ - productivity
+credentials_for_provider:
+ app_id:
+ type: text-input
+ required: true
+ label:
+ en_US: APP ID
+ placeholder:
+ en_US: Please input your feishu app id
+ zh_Hans: 请输入你的飞书 app id
+ help:
+ en_US: Get your app_id and app_secret from Feishu
+ zh_Hans: 从飞书获取您的 app_id 和 app_secret
+ url: https://open.larkoffice.com/app
+ app_secret:
+ type: secret-input
+ required: true
+ label:
+ en_US: APP Secret
+ placeholder:
+ en_US: Please input your app secret
+ zh_Hans: 请输入你的飞书 app secret
diff --git a/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.py b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.py
new file mode 100644
index 0000000000..374b4c9a7d
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.py
@@ -0,0 +1,21 @@
+from typing import Any
+
+from core.tools.entities.tool_entities import ToolInvokeMessage
+from core.tools.tool.builtin_tool import BuiltinTool
+from core.tools.utils.feishu_api_utils import FeishuRequest
+
+
+class GetWikiNodesTool(BuiltinTool):
+ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) -> ToolInvokeMessage:
+ app_id = self.runtime.credentials.get("app_id")
+ app_secret = self.runtime.credentials.get("app_secret")
+ client = FeishuRequest(app_id, app_secret)
+
+ space_id = tool_parameters.get("space_id")
+ parent_node_token = tool_parameters.get("parent_node_token")
+ page_token = tool_parameters.get("page_token")
+ page_size = tool_parameters.get("page_size")
+
+ res = client.get_wiki_nodes(space_id, parent_node_token, page_token, page_size)
+
+ return self.create_json_message(res)
diff --git a/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml
new file mode 100644
index 0000000000..7d6ac3c824
--- /dev/null
+++ b/api/core/tools/provider/builtin/feishu_wiki/tools/get_wiki_nodes.yaml
@@ -0,0 +1,63 @@
+identity:
+ name: get_wiki_nodes
+ author: Doug Lea
+ label:
+ en_US: Get Wiki Nodes
+ zh_Hans: 获取知识空间子节点列表
+description:
+ human:
+ en_US: |
+ Get the list of child nodes in Wiki, make sure the app/bot is a member of the wiki space. See How to add an app as a wiki base administrator (member). https://open.feishu.cn/document/server-docs/docs/wiki-v2/wiki-qa
+ zh_Hans: |
+ 获取知识库全部子节点列表,请确保应用/机器人为知识空间成员。参阅如何将应用添加为知识库管理员(成员)。https://open.feishu.cn/document/server-docs/docs/wiki-v2/wiki-qa
+ llm: A tool for getting all sub-nodes of a knowledge base.(获取知识空间子节点列表)
+parameters:
+ - name: space_id
+ type: string
+ required: true
+ label:
+ en_US: Space Id
+ zh_Hans: 知识空间 ID
+ human_description:
+ en_US: |
+ The ID of the knowledge space. Supports space link URL, for example: https://svi136aogf123.feishu.cn/wiki/settings/7166950623940706332
+ zh_Hans: 知识空间 ID,支持空间链接 URL,例如:https://svi136aogf123.feishu.cn/wiki/settings/7166950623940706332
+ llm_description: 知识空间 ID,支持空间链接 URL,例如:https://svi136aogf123.feishu.cn/wiki/settings/7166950623940706332
+ form: llm
+
+ - name: page_size
+ type: number
+ required: false
+ default: 10
+ label:
+ en_US: Page Size
+ zh_Hans: 分页大小
+ human_description:
+ en_US: The size of each page, with a maximum value of 50.
+ zh_Hans: 分页大小,最大值 50。
+ llm_description: 分页大小,最大值 50。
+ form: llm
+
+ - name: page_token
+ type: string
+ required: false
+ label:
+ en_US: Page Token
+ zh_Hans: 分页标记
+ human_description:
+ en_US: The pagination token. Leave empty for the first request to start from the beginning; if the paginated query result has more items, a new page_token will be returned, which can be used to get the next set of results.
+ zh_Hans: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ llm_description: 分页标记,第一次请求不填,表示从头开始遍历;分页查询结果还有更多项时会同时返回新的 page_token,下次遍历可采用该 page_token 获取查询结果。
+ form: llm
+
+ - name: parent_node_token
+ type: string
+ required: false
+ label:
+ en_US: Parent Node Token
+ zh_Hans: 父节点 token
+ human_description:
+ en_US: The token of the parent node.
+ zh_Hans: 父节点 token
+ llm_description: 父节点 token
+ form: llm
diff --git a/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml b/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml
index 58ad6d8694..589bc3433d 100644
--- a/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml
+++ b/api/core/tools/provider/builtin/jina/tools/jina_reader.yaml
@@ -2,14 +2,14 @@ identity:
name: jina_reader
author: Dify
label:
- en_US: JinaReader
- zh_Hans: JinaReader
- pt_BR: JinaReader
+ en_US: Fetch Single Page
+ zh_Hans: 获取单页面
+ pt_BR: Fetch Single Page
description:
human:
- en_US: Convert any URL to an LLM-friendly input. Experience improved output for your agent and RAG systems at no cost.
- zh_Hans: 将任何 URL 转换为 LLM 友好的输入。无需付费即可体验为您的 Agent 和 RAG 系统提供的改进输出。
- pt_BR: Converta qualquer URL em uma entrada amigável ao LLM. Experimente uma saída aprimorada para seus sistemas de agente e RAG sem custo.
+ en_US: Fetch the target URL (can be a PDF) and convert it into a LLM-friendly markdown.
+ zh_Hans: 获取目标网址(可以是 PDF),并将其转换为适合大模型处理的 Markdown 格式。
+ pt_BR: Busque a URL de destino (que pode ser um PDF) e converta em um Markdown LLM-friendly.
llm: A tool for scraping webpages. Input should be a URL.
parameters:
- name: url
@@ -17,13 +17,13 @@ parameters:
required: true
label:
en_US: URL
- zh_Hans: 网页链接
+ zh_Hans: 网址
pt_BR: URL
human_description:
- en_US: used for linking to webpages
- zh_Hans: 用于链接到网页
- pt_BR: used for linking to webpages
- llm_description: url for scraping
+ en_US: Web link
+ zh_Hans: 网页链接
+ pt_BR: URL da web
+ llm_description: url para scraping
form: llm
- name: request_params
type: string
@@ -31,14 +31,14 @@ parameters:
label:
en_US: Request params
zh_Hans: 请求参数
- pt_BR: Request params
+ pt_BR: Parâmetros de solicitação
human_description:
en_US: |
request parameters, format: {"key1": "value1", "key2": "value2"}
zh_Hans: |
请求参数,格式:{"key1": "value1", "key2": "value2"}
pt_BR: |
- request parameters, format: {"key1": "value1", "key2": "value2"}
+ parâmetros de solicitação, formato: {"key1": "value1", "key2": "value2"}
llm_description: request parameters
form: llm
- name: target_selector
@@ -51,7 +51,7 @@ parameters:
human_description:
en_US: css selector for scraping specific elements
zh_Hans: css 选择器用于抓取特定元素
- pt_BR: css selector for scraping specific elements
+ pt_BR: css selector para scraping de elementos específicos
llm_description: css selector of the target element to scrape
form: form
- name: wait_for_selector
@@ -64,7 +64,7 @@ parameters:
human_description:
en_US: css selector for waiting for specific elements
zh_Hans: css 选择器用于等待特定元素
- pt_BR: css selector for waiting for specific elements
+ pt_BR: css selector para aguardar elementos específicos
llm_description: css selector of the target element to wait for
form: form
- name: image_caption
@@ -77,8 +77,8 @@ parameters:
pt_BR: Legenda da imagem
human_description:
en_US: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing."
- zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签。这允许下游 LLM 在推理和总结等活动中与图像进行交互。"
- pt_BR: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing."
+ zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签,以支持下游模型的图像交互。"
+ pt_BR: "Adiciona legendas a todas as imagens na URL especificada, adicionando 'Imagem [idx]: [legenda]' como uma tag alt para aquelas que não têm uma. Isso permite que os modelos LLM inferiores interajam com as imagens em atividades como raciocínio e resumo."
llm_description: Captions all images at the specified URL
form: form
- name: gather_all_links_at_the_end
@@ -91,8 +91,8 @@ parameters:
pt_BR: Coletar todos os links ao final
human_description:
en_US: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions.
- zh_Hans: 最后会创建一个“按钮和链接”部分。这可以帮助下游 LLM 或 Web 代理浏览页面或采取进一步的行动。
- pt_BR: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions.
+ zh_Hans: 末尾将添加“按钮和链接”部分,方便下游模型或网络代理做页面导航或执行进一步操作。
+ pt_BR: Um "Botões & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions.
llm_description: Gather all links at the end
form: form
- name: gather_all_images_at_the_end
@@ -105,8 +105,8 @@ parameters:
pt_BR: Coletar todas as imagens ao final
human_description:
en_US: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning.
- zh_Hans: 最后会创建一个“图像”部分。这可以让下游的 LLM 概览页面上的所有视觉效果,从而提高推理能力。
- pt_BR: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning.
+ zh_Hans: 末尾会新增“图片”部分,方便下游模型全面了解页面的视觉内容,提升推理效果。
+ pt_BR: Um "Imagens" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning.
llm_description: Gather all images at the end
form: form
- name: proxy_server
diff --git a/api/core/tools/provider/builtin/jina/tools/jina_search.yaml b/api/core/tools/provider/builtin/jina/tools/jina_search.yaml
index 2bc70e1be1..e58c639e56 100644
--- a/api/core/tools/provider/builtin/jina/tools/jina_search.yaml
+++ b/api/core/tools/provider/builtin/jina/tools/jina_search.yaml
@@ -2,13 +2,14 @@ identity:
name: jina_search
author: Dify
label:
- en_US: JinaSearch
- zh_Hans: JinaSearch
- pt_BR: JinaSearch
+ en_US: Search the web
+ zh_Hans: 联网搜索
+ pt_BR: Search the web
description:
human:
- en_US: Search on the web and get the top 5 results. Useful for grounding using information from the web.
- zh_Hans: 在网络上搜索返回前 5 个结果。
+ en_US: Search on the public web of a given query and return the top results as LLM-friendly markdown.
+ zh_Hans: 针对给定的查询在互联网上进行搜索,并以适合大模型处理的 Markdown 格式返回最相关的结果。
+ pt_BR: Procurar na web pública de uma consulta fornecida e retornar os melhores resultados como markdown para LLMs.
llm: A tool for searching results on the web for grounding. Input should be a simple question.
parameters:
- name: query
@@ -16,11 +17,13 @@ parameters:
required: true
label:
en_US: Question (Query)
- zh_Hans: 信息查询
+ zh_Hans: 查询
+ pt_BR: Pergunta (Consulta)
human_description:
en_US: used to find information on the web
zh_Hans: 在网络上搜索信息
- llm_description: simple question to ask on the web
+ pt_BR: Usado para encontrar informações na web
+ llm_description: Pergunta simples para fazer na web
form: llm
- name: image_caption
type: boolean
@@ -32,7 +35,7 @@ parameters:
pt_BR: Legenda da imagem
human_description:
en_US: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing."
- zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签。这允许下游 LLM 在推理和总结等活动中与图像进行交互。"
+ zh_Hans: "为指定 URL 上的所有图像添加标题,为没有标题的图像添加“Image [idx]: [caption]”作为 alt 标签,以支持下游模型的图像交互。"
pt_BR: "Captions all images at the specified URL, adding 'Image [idx]: [caption]' as an alt tag for those without one. This allows downstream LLMs to interact with the images in activities such as reasoning and summarizing."
llm_description: Captions all images at the specified URL
form: form
@@ -46,8 +49,8 @@ parameters:
pt_BR: Coletar todos os links ao final
human_description:
en_US: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions.
- zh_Hans: 最后会创建一个“按钮和链接”部分。这可以帮助下游 LLM 或 Web 代理浏览页面或采取进一步的行动。
- pt_BR: A "Buttons & Links" section will be created at the end. This helps the downstream LLMs or web agents navigating the page or take further actions.
+ zh_Hans: 末尾将添加“按钮和链接”部分,汇总页面上的所有链接。方便下游模型或网络代理做页面导航或执行进一步操作。
+ pt_BR: Um "Botão & Links" seção será criada no final. Isso ajuda os LLMs ou agentes da web navegando pela página ou executar ações adicionais.
llm_description: Gather all links at the end
form: form
- name: gather_all_images_at_the_end
@@ -60,8 +63,8 @@ parameters:
pt_BR: Coletar todas as imagens ao final
human_description:
en_US: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning.
- zh_Hans: 最后会创建一个“图像”部分。这可以让下游的 LLM 概览页面上的所有视觉效果,从而提高推理能力。
- pt_BR: An "Images" section will be created at the end. This gives the downstream LLMs an overview of all visuals on the page, which may improve reasoning.
+ zh_Hans: 末尾会新增“图片”部分,汇总页面上的所有图片。方便下游模型概览页面的视觉内容,提升推理效果。
+ pt_BR: Um "Imagens" seção será criada no final. Isso fornece uma visão geral de todas as imagens na página para os LLMs, que pode melhorar a razão.
llm_description: Gather all images at the end
form: form
- name: proxy_server
@@ -74,7 +77,7 @@ parameters:
human_description:
en_US: Use proxy to access URLs
zh_Hans: 利用代理访问 URL
- pt_BR: Use proxy to access URLs
+ pt_BR: Usar proxy para acessar URLs
llm_description: Use proxy to access URLs
form: form
- name: no_cache
@@ -83,7 +86,7 @@ parameters:
default: false
label:
en_US: Bypass the Cache
- zh_Hans: 绕过缓存
+ zh_Hans: 是否绕过缓存
pt_BR: Ignorar o cache
human_description:
en_US: Bypass the Cache
diff --git a/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml b/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml
index 62a5c7e7ba..74885cdf9a 100644
--- a/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml
+++ b/api/core/tools/provider/builtin/jina/tools/jina_tokenizer.yaml
@@ -2,11 +2,14 @@ identity:
name: jina_tokenizer
author: hjlarry
label:
- en_US: JinaTokenizer
+ en_US: Segment
+ zh_Hans: 切分器
+ pt_BR: Segment
description:
human:
- en_US: Free API to tokenize text and segment long text into chunks.
- zh_Hans: 免费的API可以将文本tokenize,也可以将长文本分割成多个部分。
+ en_US: Split long text into chunks and do tokenization.
+ zh_Hans: 将长文本拆分成小段落,并做分词处理。
+ pt_BR: Dividir o texto longo em pedaços e fazer tokenização.
llm: Free API to tokenize text and segment long text into chunks.
parameters:
- name: content
@@ -15,6 +18,7 @@ parameters:
label:
en_US: Content
zh_Hans: 内容
+ pt_BR: Conteúdo
llm_description: the content which need to tokenize or segment
form: llm
- name: return_tokens
@@ -23,18 +27,22 @@ parameters:
label:
en_US: Return the tokens
zh_Hans: 是否返回tokens
+ pt_BR: Retornar os tokens
human_description:
en_US: Return the tokens and their corresponding ids in the response.
zh_Hans: 返回tokens及其对应的ids。
+ pt_BR: Retornar os tokens e seus respectivos ids na resposta.
form: form
- name: return_chunks
type: boolean
label:
en_US: Return the chunks
zh_Hans: 是否分块
+ pt_BR: Retornar os chunks
human_description:
en_US: Chunking the input into semantically meaningful segments while handling a wide variety of text types and edge cases based on common structural cues.
- zh_Hans: 将输入分块为具有语义意义的片段,同时根据常见的结构线索处理各种文本类型和边缘情况。
+ zh_Hans: 将输入文本分块为语义有意义的片段,同时基于常见的结构线索处理各种文本类型和特殊情况。
+ pt_BR: Dividir o texto de entrada em segmentos semanticamente significativos, enquanto lida com uma ampla variedade de tipos de texto e casos de borda com base em pistas estruturais comuns.
form: form
- name: tokenizer
type: select
diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.py b/api/core/tools/provider/builtin/stepfun/stepfun.py
index b24f730c95..239db85b11 100644
--- a/api/core/tools/provider/builtin/stepfun/stepfun.py
+++ b/api/core/tools/provider/builtin/stepfun/stepfun.py
@@ -16,7 +16,7 @@ class StepfunProvider(BuiltinToolProviderController):
user_id="",
tool_parameters={
"prompt": "cute girl, blue eyes, white hair, anime style",
- "size": "1024x1024",
+ "size": "256x256",
"n": 1,
},
)
diff --git a/api/core/tools/provider/builtin/stepfun/stepfun.yaml b/api/core/tools/provider/builtin/stepfun/stepfun.yaml
index 1f841ec369..e8139a4d7d 100644
--- a/api/core/tools/provider/builtin/stepfun/stepfun.yaml
+++ b/api/core/tools/provider/builtin/stepfun/stepfun.yaml
@@ -4,11 +4,9 @@ identity:
label:
en_US: Image-1X
zh_Hans: 阶跃星辰绘画
- pt_BR: Image-1X
description:
en_US: Image-1X
zh_Hans: 阶跃星辰绘画
- pt_BR: Image-1X
icon: icon.png
tags:
- image
@@ -20,27 +18,16 @@ credentials_for_provider:
label:
en_US: Stepfun API key
zh_Hans: 阶跃星辰API key
- pt_BR: Stepfun API key
- help:
- en_US: Please input your stepfun API key
- zh_Hans: 请输入你的阶跃星辰 API key
- pt_BR: Please input your stepfun API key
placeholder:
- en_US: Please input your stepfun API key
+ en_US: Please input your Stepfun API key
zh_Hans: 请输入你的阶跃星辰 API key
- pt_BR: Please input your stepfun API key
+ url: https://platform.stepfun.com/interface-key
stepfun_base_url:
type: text-input
required: false
label:
en_US: Stepfun base URL
zh_Hans: 阶跃星辰 base URL
- pt_BR: Stepfun base URL
- help:
- en_US: Please input your Stepfun base URL
- zh_Hans: 请输入你的阶跃星辰 base URL
- pt_BR: Please input your Stepfun base URL
placeholder:
en_US: Please input your Stepfun base URL
zh_Hans: 请输入你的阶跃星辰 base URL
- pt_BR: Please input your Stepfun base URL
diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.py b/api/core/tools/provider/builtin/stepfun/tools/image.py
index 0b92b122bf..eb55dae518 100644
--- a/api/core/tools/provider/builtin/stepfun/tools/image.py
+++ b/api/core/tools/provider/builtin/stepfun/tools/image.py
@@ -1,4 +1,3 @@
-import random
from typing import Any, Union
from openai import OpenAI
@@ -19,7 +18,7 @@ class StepfunTool(BuiltinTool):
"""
invoke tools
"""
- base_url = self.runtime.credentials.get("stepfun_base_url", "https://api.stepfun.com")
+ base_url = self.runtime.credentials.get("stepfun_base_url") or "https://api.stepfun.com"
base_url = str(URL(base_url) / "v1")
client = OpenAI(
@@ -28,9 +27,7 @@ class StepfunTool(BuiltinTool):
)
extra_body = {}
- model = tool_parameters.get("model", "step-1x-medium")
- if not model:
- return self.create_text_message("Please input model name")
+ model = "step-1x-medium"
# prompt
prompt = tool_parameters.get("prompt", "")
if not prompt:
@@ -67,9 +64,3 @@ class StepfunTool(BuiltinTool):
)
)
return result
-
- @staticmethod
- def _generate_random_id(length=8):
- characters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
- random_id = "".join(random.choices(characters, k=length))
- return random_id
diff --git a/api/core/tools/provider/builtin/stepfun/tools/image.yaml b/api/core/tools/provider/builtin/stepfun/tools/image.yaml
index dcc5bd2db2..8d7c9b6586 100644
--- a/api/core/tools/provider/builtin/stepfun/tools/image.yaml
+++ b/api/core/tools/provider/builtin/stepfun/tools/image.yaml
@@ -29,35 +29,6 @@ parameters:
pt_BR: Image prompt, you can check the official documentation of step-1x
llm_description: Image prompt of step-1x you should describe the image you want to generate as a list of words as possible as detailed
form: llm
- - name: model
- type: select
- required: false
- human_description:
- en_US: used for selecting the model name
- zh_Hans: 用于选择模型的名字
- pt_BR: used for selecting the model name
- label:
- en_US: Model Name
- zh_Hans: 模型名字
- pt_BR: Model Name
- form: form
- options:
- - value: step-1x-turbo
- label:
- en_US: turbo
- zh_Hans: turbo
- pt_BR: turbo
- - value: step-1x-medium
- label:
- en_US: medium
- zh_Hans: medium
- pt_BR: medium
- - value: step-1x-large
- label:
- en_US: large
- zh_Hans: large
- pt_BR: large
- default: step-1x-medium
- name: size
type: select
required: false
diff --git a/api/core/tools/provider/builtin/tavily/tavily.yaml b/api/core/tools/provider/builtin/tavily/tavily.yaml
index 7b25a81848..95820f4d18 100644
--- a/api/core/tools/provider/builtin/tavily/tavily.yaml
+++ b/api/core/tools/provider/builtin/tavily/tavily.yaml
@@ -28,4 +28,4 @@ credentials_for_provider:
en_US: Get your Tavily API key from Tavily
zh_Hans: 从 TavilyApi 获取您的 Tavily API key
pt_BR: Get your Tavily API key from Tavily
- url: https://docs.tavily.com/docs/tavily-api/introduction
+ url: https://docs.tavily.com/docs/welcome
diff --git a/api/core/tools/provider/builtin/youtube/youtube.py b/api/core/tools/provider/builtin/youtube/youtube.py
index aad876491c..07e430bcbf 100644
--- a/api/core/tools/provider/builtin/youtube/youtube.py
+++ b/api/core/tools/provider/builtin/youtube/youtube.py
@@ -13,7 +13,7 @@ class YahooFinanceProvider(BuiltinToolProviderController):
).invoke(
user_id="",
tool_parameters={
- "channel": "TOKYO GIRLS COLLECTION",
+ "channel": "UC2JZCsZSOudXA08cMMRCL9g",
"start_date": "2020-01-01",
"end_date": "2024-12-31",
},
diff --git a/api/core/tools/provider/tool_provider.py b/api/core/tools/provider/tool_provider.py
index 05c88b904e..321b212014 100644
--- a/api/core/tools/provider/tool_provider.py
+++ b/api/core/tools/provider/tool_provider.py
@@ -153,6 +153,9 @@ class ToolProviderController(BaseModel, ABC):
# check type
credential_schema = credentials_need_to_validate[credential_name]
+ if not credential_schema.required and credentials[credential_name] is None:
+ continue
+
if credential_schema.type in {
ToolProviderCredentials.CredentialsType.SECRET_INPUT,
ToolProviderCredentials.CredentialsType.TEXT_INPUT,
diff --git a/api/core/tools/utils/feishu_api_utils.py b/api/core/tools/utils/feishu_api_utils.py
index ffdb06498f..ce1fd7dc19 100644
--- a/api/core/tools/utils/feishu_api_utils.py
+++ b/api/core/tools/utils/feishu_api_utils.py
@@ -1,9 +1,23 @@
import httpx
+from core.tools.errors import ToolProviderCredentialValidationError
from extensions.ext_redis import redis_client
+def auth(credentials):
+ app_id = credentials.get("app_id")
+ app_secret = credentials.get("app_secret")
+ if not app_id or not app_secret:
+ raise ToolProviderCredentialValidationError("app_id and app_secret is required")
+ try:
+ assert FeishuRequest(app_id, app_secret).tenant_access_token is not None
+ except Exception as e:
+ raise ToolProviderCredentialValidationError(str(e))
+
+
class FeishuRequest:
+ API_BASE_URL = "https://lark-plugin-api.solutionsuite.cn/lark-plugin"
+
def __init__(self, app_id: str, app_secret: str):
self.app_id = app_id
self.app_secret = app_secret
@@ -42,7 +56,7 @@ class FeishuRequest:
"expire": 7200
}
"""
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/access_token/get_tenant_access_token"
+ url = f"{self.API_BASE_URL}/access_token/get_tenant_access_token"
payload = {"app_id": app_id, "app_secret": app_secret}
res = self._send_request(url, require_token=False, payload=payload)
return res
@@ -63,7 +77,7 @@ class FeishuRequest:
"msg": "创建飞书文档成功,请查看"
}
"""
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/create_document"
+ url = f"{self.API_BASE_URL}/document/create_document"
payload = {
"title": title,
"content": content,
@@ -72,13 +86,13 @@ class FeishuRequest:
res = self._send_request(url, payload=payload)
return res.get("data")
- def write_document(self, document_id: str, content: str, position: str = "start") -> dict:
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/write_document"
+ def write_document(self, document_id: str, content: str, position: str = "end") -> dict:
+ url = f"{self.API_BASE_URL}/document/write_document"
payload = {"document_id": document_id, "content": content, "position": position}
res = self._send_request(url, payload=payload)
return res
- def get_document_content(self, document_id: str, mode: str, lang: int = 0) -> dict:
+ def get_document_content(self, document_id: str, mode: str = "markdown", lang: str = "0") -> dict:
"""
API url: https://open.larkoffice.com/document/server-docs/docs/docs/docx-v1/document/raw_content
Example Response:
@@ -95,45 +109,404 @@ class FeishuRequest:
"mode": mode,
"lang": lang,
}
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/get_document_content"
- res = self._send_request(url, method="get", params=params)
+ url = f"{self.API_BASE_URL}/document/get_document_content"
+ res = self._send_request(url, method="GET", params=params)
return res.get("data").get("content")
- def list_document_blocks(self, document_id: str, page_token: str, page_size: int = 500) -> dict:
+ def list_document_blocks(
+ self, document_id: str, page_token: str, user_id_type: str = "open_id", page_size: int = 500
+ ) -> dict:
"""
API url: https://open.larkoffice.com/document/server-docs/docs/docs/docx-v1/document/list
"""
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/document/list_document_blocks"
params = {
+ "user_id_type": user_id_type,
"document_id": document_id,
"page_size": page_size,
"page_token": page_token,
}
- res = self._send_request(url, method="get", params=params)
+ url = f"{self.API_BASE_URL}/document/list_document_blocks"
+ res = self._send_request(url, method="GET", params=params)
return res.get("data")
def send_bot_message(self, receive_id_type: str, receive_id: str, msg_type: str, content: str) -> dict:
"""
API url: https://open.larkoffice.com/document/server-docs/im-v1/message/create
"""
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/message/send_bot_message"
+ url = f"{self.API_BASE_URL}/message/send_bot_message"
params = {
"receive_id_type": receive_id_type,
}
payload = {
"receive_id": receive_id,
"msg_type": msg_type,
- "content": content,
+ "content": content.strip('"').replace(r"\"", '"').replace(r"\\", "\\"),
}
res = self._send_request(url, params=params, payload=payload)
return res.get("data")
def send_webhook_message(self, webhook: str, msg_type: str, content: str) -> dict:
- url = "https://lark-plugin-api.solutionsuite.cn/lark-plugin/message/send_webhook_message"
+ url = f"{self.API_BASE_URL}/message/send_webhook_message"
payload = {
"webhook": webhook,
"msg_type": msg_type,
- "content": content,
+ "content": content.strip('"').replace(r"\"", '"').replace(r"\\", "\\"),
}
res = self._send_request(url, require_token=False, payload=payload)
return res
+
+ def get_chat_messages(
+ self,
+ container_id: str,
+ start_time: str,
+ end_time: str,
+ page_token: str,
+ sort_type: str = "ByCreateTimeAsc",
+ page_size: int = 20,
+ ) -> dict:
+ """
+ API url: https://open.larkoffice.com/document/server-docs/im-v1/message/list
+ """
+ url = f"{self.API_BASE_URL}/message/get_chat_messages"
+ params = {
+ "container_id": container_id,
+ "start_time": start_time,
+ "end_time": end_time,
+ "sort_type": sort_type,
+ "page_token": page_token,
+ "page_size": page_size,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def get_thread_messages(
+ self, container_id: str, page_token: str, sort_type: str = "ByCreateTimeAsc", page_size: int = 20
+ ) -> dict:
+ """
+ API url: https://open.larkoffice.com/document/server-docs/im-v1/message/list
+ """
+ url = f"{self.API_BASE_URL}/message/get_thread_messages"
+ params = {
+ "container_id": container_id,
+ "sort_type": sort_type,
+ "page_token": page_token,
+ "page_size": page_size,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def create_task(self, summary: str, start_time: str, end_time: str, completed_time: str, description: str) -> dict:
+ # 创建任务
+ url = f"{self.API_BASE_URL}/task/create_task"
+ payload = {
+ "summary": summary,
+ "start_time": start_time,
+ "end_time": end_time,
+ "completed_at": completed_time,
+ "description": description,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def update_task(
+ self, task_guid: str, summary: str, start_time: str, end_time: str, completed_time: str, description: str
+ ) -> dict:
+ # 更新任务
+ url = f"{self.API_BASE_URL}/task/update_task"
+ payload = {
+ "task_guid": task_guid,
+ "summary": summary,
+ "start_time": start_time,
+ "end_time": end_time,
+ "completed_time": completed_time,
+ "description": description,
+ }
+ res = self._send_request(url, method="PATCH", payload=payload)
+ return res.get("data")
+
+ def delete_task(self, task_guid: str) -> dict:
+ # 删除任务
+ url = f"{self.API_BASE_URL}/task/delete_task"
+ payload = {
+ "task_guid": task_guid,
+ }
+ res = self._send_request(url, method="DELETE", payload=payload)
+ return res
+
+ def add_members(self, task_guid: str, member_phone_or_email: str, member_role: str) -> dict:
+ # 删除任务
+ url = f"{self.API_BASE_URL}/task/add_members"
+ payload = {
+ "task_guid": task_guid,
+ "member_phone_or_email": member_phone_or_email,
+ "member_role": member_role,
+ }
+ res = self._send_request(url, payload=payload)
+ return res
+
+ def get_wiki_nodes(self, space_id: str, parent_node_token: str, page_token: str, page_size: int = 20) -> dict:
+ # 获取知识库全部子节点列表
+ url = f"{self.API_BASE_URL}/wiki/get_wiki_nodes"
+ payload = {
+ "space_id": space_id,
+ "parent_node_token": parent_node_token,
+ "page_token": page_token,
+ "page_size": page_size,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def get_primary_calendar(self, user_id_type: str = "open_id") -> dict:
+ url = f"{self.API_BASE_URL}/calendar/get_primary_calendar"
+ params = {
+ "user_id_type": user_id_type,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def create_event(
+ self,
+ summary: str,
+ description: str,
+ start_time: str,
+ end_time: str,
+ attendee_ability: str,
+ need_notification: bool = True,
+ auto_record: bool = False,
+ ) -> dict:
+ url = f"{self.API_BASE_URL}/calendar/create_event"
+ payload = {
+ "summary": summary,
+ "description": description,
+ "need_notification": need_notification,
+ "start_time": start_time,
+ "end_time": end_time,
+ "auto_record": auto_record,
+ "attendee_ability": attendee_ability,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def update_event(
+ self,
+ event_id: str,
+ summary: str,
+ description: str,
+ need_notification: bool,
+ start_time: str,
+ end_time: str,
+ auto_record: bool,
+ ) -> dict:
+ url = f"{self.API_BASE_URL}/calendar/update_event/{event_id}"
+ payload = {}
+ if summary:
+ payload["summary"] = summary
+ if description:
+ payload["description"] = description
+ if start_time:
+ payload["start_time"] = start_time
+ if end_time:
+ payload["end_time"] = end_time
+ if need_notification:
+ payload["need_notification"] = need_notification
+ if auto_record:
+ payload["auto_record"] = auto_record
+ res = self._send_request(url, method="PATCH", payload=payload)
+ return res
+
+ def delete_event(self, event_id: str, need_notification: bool = True) -> dict:
+ url = f"{self.API_BASE_URL}/calendar/delete_event/{event_id}"
+ params = {
+ "need_notification": need_notification,
+ }
+ res = self._send_request(url, method="DELETE", params=params)
+ return res
+
+ def list_events(self, start_time: str, end_time: str, page_token: str, page_size: int = 50) -> dict:
+ url = f"{self.API_BASE_URL}/calendar/list_events"
+ params = {
+ "start_time": start_time,
+ "end_time": end_time,
+ "page_token": page_token,
+ "page_size": page_size,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def search_events(
+ self,
+ query: str,
+ start_time: str,
+ end_time: str,
+ page_token: str,
+ user_id_type: str = "open_id",
+ page_size: int = 20,
+ ) -> dict:
+ url = f"{self.API_BASE_URL}/calendar/search_events"
+ payload = {
+ "query": query,
+ "start_time": start_time,
+ "end_time": end_time,
+ "page_token": page_token,
+ "user_id_type": user_id_type,
+ "page_size": page_size,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def add_event_attendees(self, event_id: str, attendee_phone_or_email: str, need_notification: bool = True) -> dict:
+ # 参加日程参会人
+ url = f"{self.API_BASE_URL}/calendar/add_event_attendees"
+ payload = {
+ "event_id": event_id,
+ "attendee_phone_or_email": attendee_phone_or_email,
+ "need_notification": need_notification,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def create_spreadsheet(
+ self,
+ title: str,
+ folder_token: str,
+ ) -> dict:
+ # 创建电子表格
+ url = f"{self.API_BASE_URL}/spreadsheet/create_spreadsheet"
+ payload = {
+ "title": title,
+ "folder_token": folder_token,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def get_spreadsheet(
+ self,
+ spreadsheet_token: str,
+ user_id_type: str = "open_id",
+ ) -> dict:
+ # 获取电子表格信息
+ url = f"{self.API_BASE_URL}/spreadsheet/get_spreadsheet"
+ params = {
+ "spreadsheet_token": spreadsheet_token,
+ "user_id_type": user_id_type,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def list_spreadsheet_sheets(
+ self,
+ spreadsheet_token: str,
+ ) -> dict:
+ # 列出电子表格的所有工作表
+ url = f"{self.API_BASE_URL}/spreadsheet/list_spreadsheet_sheets"
+ params = {
+ "spreadsheet_token": spreadsheet_token,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def add_rows(
+ self,
+ spreadsheet_token: str,
+ sheet_id: str,
+ sheet_name: str,
+ length: int,
+ values: str,
+ ) -> dict:
+ # 增加行,在工作表最后添加
+ url = f"{self.API_BASE_URL}/spreadsheet/add_rows"
+ payload = {
+ "spreadsheet_token": spreadsheet_token,
+ "sheet_id": sheet_id,
+ "sheet_name": sheet_name,
+ "length": length,
+ "values": values,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def add_cols(
+ self,
+ spreadsheet_token: str,
+ sheet_id: str,
+ sheet_name: str,
+ length: int,
+ values: str,
+ ) -> dict:
+ # 增加列,在工作表最后添加
+ url = f"{self.API_BASE_URL}/spreadsheet/add_cols"
+ payload = {
+ "spreadsheet_token": spreadsheet_token,
+ "sheet_id": sheet_id,
+ "sheet_name": sheet_name,
+ "length": length,
+ "values": values,
+ }
+ res = self._send_request(url, payload=payload)
+ return res.get("data")
+
+ def read_rows(
+ self,
+ spreadsheet_token: str,
+ sheet_id: str,
+ sheet_name: str,
+ start_row: int,
+ num_rows: int,
+ user_id_type: str = "open_id",
+ ) -> dict:
+ # 读取工作表行数据
+ url = f"{self.API_BASE_URL}/spreadsheet/read_rows"
+ params = {
+ "spreadsheet_token": spreadsheet_token,
+ "sheet_id": sheet_id,
+ "sheet_name": sheet_name,
+ "start_row": start_row,
+ "num_rows": num_rows,
+ "user_id_type": user_id_type,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def read_cols(
+ self,
+ spreadsheet_token: str,
+ sheet_id: str,
+ sheet_name: str,
+ start_col: int,
+ num_cols: int,
+ user_id_type: str = "open_id",
+ ) -> dict:
+ # 读取工作表列数据
+ url = f"{self.API_BASE_URL}/spreadsheet/read_cols"
+ params = {
+ "spreadsheet_token": spreadsheet_token,
+ "sheet_id": sheet_id,
+ "sheet_name": sheet_name,
+ "start_col": start_col,
+ "num_cols": num_cols,
+ "user_id_type": user_id_type,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
+
+ def read_table(
+ self,
+ spreadsheet_token: str,
+ sheet_id: str,
+ sheet_name: str,
+ num_range: str,
+ query: str,
+ user_id_type: str = "open_id",
+ ) -> dict:
+ # 自定义读取行列数据
+ url = f"{self.API_BASE_URL}/spreadsheet/read_table"
+ params = {
+ "spreadsheet_token": spreadsheet_token,
+ "sheet_id": sheet_id,
+ "sheet_name": sheet_name,
+ "range": num_range,
+ "query": query,
+ "user_id_type": user_id_type,
+ }
+ res = self._send_request(url, method="GET", params=params)
+ return res.get("data")
diff --git a/api/poetry.lock b/api/poetry.lock
index d32dcebc19..8a526adf4e 100644
--- a/api/poetry.lock
+++ b/api/poetry.lock
@@ -2333,13 +2333,13 @@ develop = ["aiohttp", "furo", "httpx", "opentelemetry-api", "opentelemetry-sdk",
[[package]]
name = "elasticsearch"
-version = "8.14.0"
+version = "8.15.1"
description = "Python client for Elasticsearch"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "elasticsearch-8.14.0-py3-none-any.whl", hash = "sha256:cef8ef70a81af027f3da74a4f7d9296b390c636903088439087b8262a468c130"},
- {file = "elasticsearch-8.14.0.tar.gz", hash = "sha256:aa2490029dd96f4015b333c1827aa21fd6c0a4d223b00dfb0fe933b8d09a511b"},
+ {file = "elasticsearch-8.15.1-py3-none-any.whl", hash = "sha256:02a0476e98768a30d7926335fc0d305c04fdb928eea1354c6e6040d8c2814569"},
+ {file = "elasticsearch-8.15.1.tar.gz", hash = "sha256:40c0d312f8adf8bdc81795bc16a0b546ddf544cb1f90e829a244e4780c4dbfd8"},
]
[package.dependencies]
@@ -2347,7 +2347,10 @@ elastic-transport = ">=8.13,<9"
[package.extras]
async = ["aiohttp (>=3,<4)"]
+dev = ["aiohttp", "black", "build", "coverage", "isort", "jinja2", "mapbox-vector-tile", "nox", "numpy", "orjson", "pandas", "pyarrow", "pytest", "pytest-asyncio", "pytest-cov", "python-dateutil", "pyyaml (>=5.4)", "requests (>=2,<3)", "simsimd", "twine", "unasync"]
+docs = ["sphinx", "sphinx-autodoc-typehints", "sphinx-rtd-theme (>=2.0)"]
orjson = ["orjson (>=3)"]
+pyarrow = ["pyarrow (>=1)"]
requests = ["requests (>=2.4.0,!=2.32.2,<3.0.0)"]
vectorstore-mmr = ["numpy (>=1)", "simsimd (>=3)"]
diff --git a/api/pyproject.toml b/api/pyproject.toml
index c3b4ca300f..af7661fe23 100644
--- a/api/pyproject.toml
+++ b/api/pyproject.toml
@@ -254,7 +254,7 @@ alibabacloud_gpdb20160503 = "~3.8.0"
alibabacloud_tea_openapi = "~0.3.9"
chromadb = "0.5.1"
clickhouse-connect = "~0.7.16"
-elasticsearch = "8.14.0"
+elasticsearch = "~8.15.1"
oracledb = "~2.2.1"
pgvecto-rs = { version = "~0.2.1", extras = ['sqlalchemy'] }
pgvector = "0.2.5"
diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py b/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py
new file mode 100644
index 0000000000..7bf723b3a9
--- /dev/null
+++ b/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py
@@ -0,0 +1,54 @@
+import os
+
+import pytest
+
+from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
+from core.model_runtime.errors.validate import CredentialsValidateFailedError
+from core.model_runtime.model_providers.fireworks.text_embedding.text_embedding import FireworksTextEmbeddingModel
+from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
+
+
+@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True)
+def test_validate_credentials(setup_openai_mock):
+ model = FireworksTextEmbeddingModel()
+
+ with pytest.raises(CredentialsValidateFailedError):
+ model.validate_credentials(
+ model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": "invalid_key"}
+ )
+
+ model.validate_credentials(
+ model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}
+ )
+
+
+@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True)
+def test_invoke_model(setup_openai_mock):
+ model = FireworksTextEmbeddingModel()
+
+ result = model.invoke(
+ model="nomic-ai/nomic-embed-text-v1.5",
+ credentials={
+ "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"),
+ },
+ texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)],
+ user="foo",
+ )
+
+ assert isinstance(result, TextEmbeddingResult)
+ assert len(result.embeddings) == 4
+ assert result.usage.total_tokens == 2
+
+
+def test_get_num_tokens():
+ model = FireworksTextEmbeddingModel()
+
+ num_tokens = model.get_num_tokens(
+ model="nomic-ai/nomic-embed-text-v1.5",
+ credentials={
+ "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"),
+ },
+ texts=["hello", "world"],
+ )
+
+ assert num_tokens == 2
diff --git a/docker/.env.example b/docker/.env.example
index 7eaaceb928..d43c3edc7e 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -346,7 +346,7 @@ VOLCENGINE_TOS_REGION=your-region
# ------------------------------
# The type of vector store to use.
-# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`.
+# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, ``chroma`, `opensearch`, `tidb_vector`, `oracle`, `tencent`, `elasticsearch`, `analyticdb`.
VECTOR_STORE=weaviate
# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`.
@@ -385,13 +385,30 @@ MYSCALE_PASSWORD=
MYSCALE_DATABASE=dify
MYSCALE_FTS_PARAMS=
-# pgvector configurations, only available when VECTOR_STORE is `pgvecto-rs or pgvector`
+# pgvector configurations, only available when VECTOR_STORE is `pgvector`
PGVECTOR_HOST=pgvector
PGVECTOR_PORT=5432
PGVECTOR_USER=postgres
PGVECTOR_PASSWORD=difyai123456
PGVECTOR_DATABASE=dify
+# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs`
+PGVECTO_RS_HOST=pgvecto-rs
+PGVECTO_RS_PORT=5432
+PGVECTO_RS_USER=postgres
+PGVECTO_RS_PASSWORD=difyai123456
+PGVECTO_RS_DATABASE=dify
+
+# analyticdb configurations, only available when VECTOR_STORE is `analyticdb`
+ANALYTICDB_KEY_ID=your-ak
+ANALYTICDB_KEY_SECRET=your-sk
+ANALYTICDB_REGION_ID=cn-hangzhou
+ANALYTICDB_INSTANCE_ID=gp-ab123456
+ANALYTICDB_ACCOUNT=testaccount
+ANALYTICDB_PASSWORD=testpassword
+ANALYTICDB_NAMESPACE=dify
+ANALYTICDB_NAMESPACE_PASSWORD=difypassword
+
# TiDB vector configurations, only available when VECTOR_STORE is `tidb`
TIDB_VECTOR_HOST=tidb
TIDB_VECTOR_PORT=4000
@@ -568,6 +585,10 @@ WORKFLOW_MAX_EXECUTION_STEPS=500
WORKFLOW_MAX_EXECUTION_TIME=1200
WORKFLOW_CALL_MAX_DEPTH=5
+# HTTP request node in workflow configuration
+HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760
+HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576
+
# SSRF Proxy server HTTP URL
SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128
# SSRF Proxy server HTTPS URL
diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml
index 16bef279bc..95e271a0e9 100644
--- a/docker/docker-compose.yaml
+++ b/docker/docker-compose.yaml
@@ -207,6 +207,8 @@ x-shared-env: &shared-api-worker-env
WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_MAX_EXECUTION_TIME:-5}
SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128}
SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128}
+ HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760}
+ HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576}
services:
# API service
@@ -628,7 +630,7 @@ services:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/settings.html
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-prod-prerequisites
elasticsearch:
- image: docker.elastic.co/elasticsearch/elasticsearch:8.14.3
+ image: docker.elastic.co/elasticsearch/elasticsearch:8.15.1
container_name: elasticsearch
profiles:
- elasticsearch
@@ -655,7 +657,7 @@ services:
# https://www.elastic.co/guide/en/kibana/current/docker.html
# https://www.elastic.co/guide/en/kibana/current/settings.html
kibana:
- image: docker.elastic.co/kibana/kibana:8.14.3
+ image: docker.elastic.co/kibana/kibana:8.15.1
container_name: kibana
profiles:
- elasticsearch
diff --git a/sdks/python-client/dify_client/client.py b/sdks/python-client/dify_client/client.py
index 2be079bdf3..5e42507a42 100644
--- a/sdks/python-client/dify_client/client.py
+++ b/sdks/python-client/dify_client/client.py
@@ -1,103 +1,80 @@
import json
+
import requests
class DifyClient:
- def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1'):
+ def __init__(self, api_key, base_url: str = "https://api.dify.ai/v1"):
self.api_key = api_key
self.base_url = base_url
def _send_request(self, method, endpoint, json=None, params=None, stream=False):
- headers = {
- "Authorization": f"Bearer {self.api_key}",
- "Content-Type": "application/json"
- }
+ headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
url = f"{self.base_url}{endpoint}"
response = requests.request(method, url, json=json, params=params, headers=headers, stream=stream)
return response
-
def _send_request_with_files(self, method, endpoint, data, files):
- headers = {
- "Authorization": f"Bearer {self.api_key}"
- }
+ headers = {"Authorization": f"Bearer {self.api_key}"}
url = f"{self.base_url}{endpoint}"
response = requests.request(method, url, data=data, headers=headers, files=files)
return response
-
+
def message_feedback(self, message_id, rating, user):
- data = {
- "rating": rating,
- "user": user
- }
+ data = {"rating": rating, "user": user}
return self._send_request("POST", f"/messages/{message_id}/feedbacks", data)
-
+
def get_application_parameters(self, user):
params = {"user": user}
return self._send_request("GET", "/parameters", params=params)
-
+
def file_upload(self, user, files):
- data = {
- "user": user
- }
+ data = {"user": user}
return self._send_request_with_files("POST", "/files/upload", data=data, files=files)
- def text_to_audio(self, text:str, user:str, streaming:bool=False):
- data = {
- "text": text,
- "user": user,
- "streaming": streaming
- }
+ def text_to_audio(self, text: str, user: str, streaming: bool = False):
+ data = {"text": text, "user": user, "streaming": streaming}
return self._send_request("POST", "/text-to-audio", data=data)
-
- def get_meta(self,user):
- params = { "user": user}
- return self._send_request("GET", f"/meta", params=params)
+
+ def get_meta(self, user):
+ params = {"user": user}
+ return self._send_request("GET", "/meta", params=params)
class CompletionClient(DifyClient):
def create_completion_message(self, inputs, response_mode, user, files=None):
- data = {
- "inputs": inputs,
- "response_mode": response_mode,
- "user": user,
- "files": files
- }
- return self._send_request("POST", "/completion-messages", data,
- stream=True if response_mode == "streaming" else False)
+ data = {"inputs": inputs, "response_mode": response_mode, "user": user, "files": files}
+ return self._send_request(
+ "POST", "/completion-messages", data, stream=True if response_mode == "streaming" else False
+ )
class ChatClient(DifyClient):
def create_chat_message(self, inputs, query, user, response_mode="blocking", conversation_id=None, files=None):
- data = {
- "inputs": inputs,
- "query": query,
- "user": user,
- "response_mode": response_mode,
- "files": files
- }
+ data = {"inputs": inputs, "query": query, "user": user, "response_mode": response_mode, "files": files}
if conversation_id:
data["conversation_id"] = conversation_id
- return self._send_request("POST", "/chat-messages", data,
- stream=True if response_mode == "streaming" else False)
-
- def get_suggested(self, message_id, user:str):
+ return self._send_request(
+ "POST", "/chat-messages", data, stream=True if response_mode == "streaming" else False
+ )
+
+ def get_suggested(self, message_id, user: str):
params = {"user": user}
return self._send_request("GET", f"/messages/{message_id}/suggested", params=params)
-
+
def stop_message(self, task_id, user):
data = {"user": user}
- return self._send_request("POST", f"/chat-messages/{task_id}/stop", data)
+ return self._send_request("POST", f"/chat-messages/{task_id}/stop", data)
def get_conversations(self, user, last_id=None, limit=None, pinned=None):
params = {"user": user, "last_id": last_id, "limit": limit, "pinned": pinned}
return self._send_request("GET", "/conversations", params=params)
-
+
def get_conversation_messages(self, user, conversation_id=None, first_id=None, limit=None):
params = {"user": user}
@@ -109,15 +86,15 @@ class ChatClient(DifyClient):
params["limit"] = limit
return self._send_request("GET", "/messages", params=params)
-
- def rename_conversation(self, conversation_id, name,auto_generate:bool, user:str):
- data = {"name": name, "auto_generate": auto_generate,"user": user}
+
+ def rename_conversation(self, conversation_id, name, auto_generate: bool, user: str):
+ data = {"name": name, "auto_generate": auto_generate, "user": user}
return self._send_request("POST", f"/conversations/{conversation_id}/name", data)
def delete_conversation(self, conversation_id, user):
data = {"user": user}
return self._send_request("DELETE", f"/conversations/{conversation_id}", data)
-
+
def audio_to_text(self, audio_file, user):
data = {"user": user}
files = {"audio_file": audio_file}
@@ -125,10 +102,10 @@ class ChatClient(DifyClient):
class WorkflowClient(DifyClient):
- def run(self, inputs:dict, response_mode:str="streaming", user:str="abc-123"):
+ def run(self, inputs: dict, response_mode: str = "streaming", user: str = "abc-123"):
data = {"inputs": inputs, "response_mode": response_mode, "user": user}
return self._send_request("POST", "/workflows/run", data)
-
+
def stop(self, task_id, user):
data = {"user": user}
return self._send_request("POST", f"/workflows/tasks/{task_id}/stop", data)
@@ -137,10 +114,8 @@ class WorkflowClient(DifyClient):
return self._send_request("GET", f"/workflows/run/{workflow_run_id}")
-
class KnowledgeBaseClient(DifyClient):
-
- def __init__(self, api_key, base_url: str = 'https://api.dify.ai/v1', dataset_id: str = None):
+ def __init__(self, api_key, base_url: str = "https://api.dify.ai/v1", dataset_id: str = None):
"""
Construct a KnowledgeBaseClient object.
@@ -150,10 +125,7 @@ class KnowledgeBaseClient(DifyClient):
dataset_id (str, optional): ID of the dataset. Defaults to None. You don't need this if you just want to
create a new dataset. or list datasets. otherwise you need to set this.
"""
- super().__init__(
- api_key=api_key,
- base_url=base_url
- )
+ super().__init__(api_key=api_key, base_url=base_url)
self.dataset_id = dataset_id
def _get_dataset_id(self):
@@ -162,10 +134,10 @@ class KnowledgeBaseClient(DifyClient):
return self.dataset_id
def create_dataset(self, name: str, **kwargs):
- return self._send_request('POST', '/datasets', {'name': name}, **kwargs)
+ return self._send_request("POST", "/datasets", {"name": name}, **kwargs)
def list_datasets(self, page: int = 1, page_size: int = 20, **kwargs):
- return self._send_request('GET', f'/datasets?page={page}&limit={page_size}', **kwargs)
+ return self._send_request("GET", f"/datasets?page={page}&limit={page_size}", **kwargs)
def create_document_by_text(self, name, text, extra_params: dict = None, **kwargs):
"""
@@ -193,14 +165,7 @@ class KnowledgeBaseClient(DifyClient):
}
:return: Response from the API
"""
- data = {
- 'indexing_technique': 'high_quality',
- 'process_rule': {
- 'mode': 'automatic'
- },
- 'name': name,
- 'text': text
- }
+ data = {"indexing_technique": "high_quality", "process_rule": {"mode": "automatic"}, "name": name, "text": text}
if extra_params is not None and isinstance(extra_params, dict):
data.update(extra_params)
url = f"/datasets/{self._get_dataset_id()}/document/create_by_text"
@@ -233,10 +198,7 @@ class KnowledgeBaseClient(DifyClient):
}
:return: Response from the API
"""
- data = {
- 'name': name,
- 'text': text
- }
+ data = {"name": name, "text": text}
if extra_params is not None and isinstance(extra_params, dict):
data.update(extra_params)
url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/update_by_text"
@@ -269,16 +231,11 @@ class KnowledgeBaseClient(DifyClient):
:return: Response from the API
"""
files = {"file": open(file_path, "rb")}
- data = {
- 'process_rule': {
- 'mode': 'automatic'
- },
- 'indexing_technique': 'high_quality'
- }
+ data = {"process_rule": {"mode": "automatic"}, "indexing_technique": "high_quality"}
if extra_params is not None and isinstance(extra_params, dict):
data.update(extra_params)
if original_document_id is not None:
- data['original_document_id'] = original_document_id
+ data["original_document_id"] = original_document_id
url = f"/datasets/{self._get_dataset_id()}/document/create_by_file"
return self._send_request_with_files("POST", url, {"data": json.dumps(data)}, files)
@@ -352,11 +309,11 @@ class KnowledgeBaseClient(DifyClient):
"""
params = {}
if page is not None:
- params['page'] = page
+ params["page"] = page
if page_size is not None:
- params['limit'] = page_size
+ params["limit"] = page_size
if keyword is not None:
- params['keyword'] = keyword
+ params["keyword"] = keyword
url = f"/datasets/{self._get_dataset_id()}/documents"
return self._send_request("GET", url, params=params, **kwargs)
@@ -383,9 +340,9 @@ class KnowledgeBaseClient(DifyClient):
url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments"
params = {}
if keyword is not None:
- params['keyword'] = keyword
+ params["keyword"] = keyword
if status is not None:
- params['status'] = status
+ params["status"] = status
if "params" in kwargs:
params.update(kwargs["params"])
return self._send_request("GET", url, params=params, **kwargs)
diff --git a/web/app/activate/page.tsx b/web/app/activate/page.tsx
index 90874f50ce..0f18544335 100644
--- a/web/app/activate/page.tsx
+++ b/web/app/activate/page.tsx
@@ -22,7 +22,7 @@ const Activate = () => {
{children})
- }, [chartData, children, className, inline, isSVG, language, languageShowName, match, props])
+ }
+ else if (language === 'svg' && isSVG) {
+ return (
+ {children}
+
+ return (
+