mirror of https://github.com/langgenius/dify.git
Merge branch 'main' into feat/compliance-report-download
This commit is contained in:
commit
2ff2b08739
|
|
@ -52,12 +52,14 @@ RUN apt-get update \
|
|||
&& apt-get install -y --no-install-recommends curl nodejs libgmp-dev libmpfr-dev libmpc-dev \
|
||||
# if you located in China, you can use aliyun mirror to speed up
|
||||
# && echo "deb http://mirrors.aliyun.com/debian testing main" > /etc/apt/sources.list \
|
||||
&& echo "deb http://deb.debian.org/debian testing main" > /etc/apt/sources.list \
|
||||
&& echo "deb http://deb.debian.org/debian bookworm main" > /etc/apt/sources.list \
|
||||
&& apt-get update \
|
||||
# For Security
|
||||
&& apt-get install -y --no-install-recommends expat=2.6.4-1 libldap-2.5-0=2.5.19+dfsg-1 perl=5.40.0-8 libsqlite3-0=3.46.1-1 zlib1g=1:1.3.dfsg+really1.3.1-1+b1 \
|
||||
&& apt-get install -y --no-install-recommends expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
|
||||
# install a chinese font to support the use of tools like matplotlib
|
||||
&& apt-get install -y fonts-noto-cjk \
|
||||
# install libmagic to support the use of python-magic guess MIMETYPE
|
||||
&& apt-get install -y libmagic1 \
|
||||
&& apt-get autoremove -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,32 @@
|
|||
import mimetypes
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import urllib.parse
|
||||
import warnings
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
import httpx
|
||||
|
||||
try:
|
||||
import magic
|
||||
except ImportError:
|
||||
if platform.system() == "Windows":
|
||||
warnings.warn(
|
||||
"To use python-magic guess MIMETYPE, you need to run `pip install python-magic-bin`", stacklevel=2
|
||||
)
|
||||
elif platform.system() == "Darwin":
|
||||
warnings.warn("To use python-magic guess MIMETYPE, you need to run `brew install libmagic`", stacklevel=2)
|
||||
elif platform.system() == "Linux":
|
||||
warnings.warn(
|
||||
"To use python-magic guess MIMETYPE, you need to run `sudo apt-get install libmagic1`", stacklevel=2
|
||||
)
|
||||
else:
|
||||
warnings.warn("To use python-magic guess MIMETYPE, you need to install `libmagic`", stacklevel=2)
|
||||
magic = None # type: ignore
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from configs import dify_config
|
||||
|
|
@ -47,6 +67,13 @@ def guess_file_info_from_response(response: httpx.Response):
|
|||
# If guessing fails, use Content-Type from response headers
|
||||
mimetype = response.headers.get("Content-Type", "application/octet-stream")
|
||||
|
||||
# Use python-magic to guess MIME type if still unknown or generic
|
||||
if mimetype == "application/octet-stream" and magic is not None:
|
||||
try:
|
||||
mimetype = magic.from_buffer(response.content[:1024], mime=True)
|
||||
except magic.MagicException:
|
||||
pass
|
||||
|
||||
extension = os.path.splitext(filename)[1]
|
||||
|
||||
# Ensure filename has an extension
|
||||
|
|
|
|||
|
|
@ -620,7 +620,6 @@ class DatasetRetrievalSettingApi(Resource):
|
|||
match vector_type:
|
||||
case (
|
||||
VectorType.RELYT
|
||||
| VectorType.PGVECTOR
|
||||
| VectorType.TIDB_VECTOR
|
||||
| VectorType.CHROMA
|
||||
| VectorType.TENCENT
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ class MessageListApi(InstalledAppResource):
|
|||
|
||||
try:
|
||||
return MessageService.pagination_by_first_id(
|
||||
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
|
||||
app_model, current_user, args["conversation_id"], args["first_id"], args["limit"]
|
||||
)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import json
|
||||
|
||||
from flask_restful import Resource, reqparse # type: ignore
|
||||
|
||||
from controllers.console.wraps import setup_required
|
||||
|
|
@ -29,4 +31,34 @@ class EnterpriseWorkspace(Resource):
|
|||
return {"message": "enterprise workspace created."}
|
||||
|
||||
|
||||
class EnterpriseWorkspaceNoOwnerEmail(Resource):
|
||||
@setup_required
|
||||
@inner_api_only
|
||||
def post(self):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
tenant = TenantService.create_tenant(args["name"], is_from_dashboard=True)
|
||||
|
||||
tenant_was_created.send(tenant)
|
||||
|
||||
resp = {
|
||||
"id": tenant.id,
|
||||
"name": tenant.name,
|
||||
"encrypt_public_key": tenant.encrypt_public_key,
|
||||
"plan": tenant.plan,
|
||||
"status": tenant.status,
|
||||
"custom_config": json.loads(tenant.custom_config) if tenant.custom_config else {},
|
||||
"created_at": tenant.created_at.isoformat() if tenant.created_at else None,
|
||||
"updated_at": tenant.updated_at.isoformat() if tenant.updated_at else None,
|
||||
}
|
||||
|
||||
return {
|
||||
"message": "enterprise workspace created.",
|
||||
"tenant": resp,
|
||||
}
|
||||
|
||||
|
||||
api.add_resource(EnterpriseWorkspace, "/enterprise/workspace")
|
||||
api.add_resource(EnterpriseWorkspaceNoOwnerEmail, "/enterprise/workspace/ownerless")
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ from controllers.service_api.app.error import (
|
|||
from controllers.service_api.dataset.error import (
|
||||
ArchivedDocumentImmutableError,
|
||||
DocumentIndexingError,
|
||||
InvalidMetadataError,
|
||||
)
|
||||
from controllers.service_api.wraps import DatasetApiResource, cloud_edition_billing_resource_check
|
||||
from core.errors.error import ProviderTokenNotInitError
|
||||
|
|
@ -50,6 +51,9 @@ class DocumentAddByTextApi(DatasetApiResource):
|
|||
"indexing_technique", type=str, choices=Dataset.INDEXING_TECHNIQUE_LIST, nullable=False, location="json"
|
||||
)
|
||||
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
|
||||
parser.add_argument("doc_type", type=str, required=False, nullable=True, location="json")
|
||||
parser.add_argument("doc_metadata", type=dict, required=False, nullable=True, location="json")
|
||||
|
||||
args = parser.parse_args()
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
|
|
@ -61,6 +65,28 @@ class DocumentAddByTextApi(DatasetApiResource):
|
|||
if not dataset.indexing_technique and not args["indexing_technique"]:
|
||||
raise ValueError("indexing_technique is required.")
|
||||
|
||||
# Validate metadata if provided
|
||||
if args.get("doc_type") or args.get("doc_metadata"):
|
||||
if not args.get("doc_type") or not args.get("doc_metadata"):
|
||||
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
|
||||
|
||||
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
|
||||
raise InvalidMetadataError(
|
||||
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
|
||||
)
|
||||
|
||||
if not isinstance(args["doc_metadata"], dict):
|
||||
raise InvalidMetadataError("doc_metadata must be a dictionary")
|
||||
|
||||
# Validate metadata schema based on doc_type
|
||||
if args["doc_type"] != "others":
|
||||
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
|
||||
for key, value in args["doc_metadata"].items():
|
||||
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
|
||||
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
|
||||
# set to MetaDataConfig
|
||||
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
|
||||
|
||||
text = args.get("text")
|
||||
name = args.get("name")
|
||||
if text is None or name is None:
|
||||
|
|
@ -107,6 +133,8 @@ class DocumentUpdateByTextApi(DatasetApiResource):
|
|||
"doc_language", type=str, default="English", required=False, nullable=False, location="json"
|
||||
)
|
||||
parser.add_argument("retrieval_model", type=dict, required=False, nullable=False, location="json")
|
||||
parser.add_argument("doc_type", type=str, required=False, nullable=True, location="json")
|
||||
parser.add_argument("doc_metadata", type=dict, required=False, nullable=True, location="json")
|
||||
args = parser.parse_args()
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
|
|
@ -115,6 +143,32 @@ class DocumentUpdateByTextApi(DatasetApiResource):
|
|||
if not dataset:
|
||||
raise ValueError("Dataset is not exist.")
|
||||
|
||||
# indexing_technique is already set in dataset since this is an update
|
||||
args["indexing_technique"] = dataset.indexing_technique
|
||||
|
||||
# Validate metadata if provided
|
||||
if args.get("doc_type") or args.get("doc_metadata"):
|
||||
if not args.get("doc_type") or not args.get("doc_metadata"):
|
||||
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
|
||||
|
||||
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
|
||||
raise InvalidMetadataError(
|
||||
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
|
||||
)
|
||||
|
||||
if not isinstance(args["doc_metadata"], dict):
|
||||
raise InvalidMetadataError("doc_metadata must be a dictionary")
|
||||
|
||||
# Validate metadata schema based on doc_type
|
||||
if args["doc_type"] != "others":
|
||||
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
|
||||
for key, value in args["doc_metadata"].items():
|
||||
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
|
||||
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
|
||||
|
||||
# set to MetaDataConfig
|
||||
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
|
||||
|
||||
if args["text"]:
|
||||
text = args.get("text")
|
||||
name = args.get("name")
|
||||
|
|
@ -161,6 +215,30 @@ class DocumentAddByFileApi(DatasetApiResource):
|
|||
args["doc_form"] = "text_model"
|
||||
if "doc_language" not in args:
|
||||
args["doc_language"] = "English"
|
||||
|
||||
# Validate metadata if provided
|
||||
if args.get("doc_type") or args.get("doc_metadata"):
|
||||
if not args.get("doc_type") or not args.get("doc_metadata"):
|
||||
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
|
||||
|
||||
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
|
||||
raise InvalidMetadataError(
|
||||
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
|
||||
)
|
||||
|
||||
if not isinstance(args["doc_metadata"], dict):
|
||||
raise InvalidMetadataError("doc_metadata must be a dictionary")
|
||||
|
||||
# Validate metadata schema based on doc_type
|
||||
if args["doc_type"] != "others":
|
||||
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
|
||||
for key, value in args["doc_metadata"].items():
|
||||
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
|
||||
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
|
||||
|
||||
# set to MetaDataConfig
|
||||
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
|
||||
|
||||
# get dataset info
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
|
|
@ -228,6 +306,29 @@ class DocumentUpdateByFileApi(DatasetApiResource):
|
|||
if "doc_language" not in args:
|
||||
args["doc_language"] = "English"
|
||||
|
||||
# Validate metadata if provided
|
||||
if args.get("doc_type") or args.get("doc_metadata"):
|
||||
if not args.get("doc_type") or not args.get("doc_metadata"):
|
||||
raise InvalidMetadataError("Both doc_type and doc_metadata must be provided when adding metadata")
|
||||
|
||||
if args["doc_type"] not in DocumentService.DOCUMENT_METADATA_SCHEMA:
|
||||
raise InvalidMetadataError(
|
||||
"Invalid doc_type. Must be one of: " + ", ".join(DocumentService.DOCUMENT_METADATA_SCHEMA.keys())
|
||||
)
|
||||
|
||||
if not isinstance(args["doc_metadata"], dict):
|
||||
raise InvalidMetadataError("doc_metadata must be a dictionary")
|
||||
|
||||
# Validate metadata schema based on doc_type
|
||||
if args["doc_type"] != "others":
|
||||
metadata_schema = DocumentService.DOCUMENT_METADATA_SCHEMA[args["doc_type"]]
|
||||
for key, value in args["doc_metadata"].items():
|
||||
if key in metadata_schema and not isinstance(value, metadata_schema[key]):
|
||||
raise InvalidMetadataError(f"Invalid type for metadata field {key}")
|
||||
|
||||
# set to MetaDataConfig
|
||||
args["metadata"] = {"doc_type": args["doc_type"], "doc_metadata": args["doc_metadata"]}
|
||||
|
||||
# get dataset info
|
||||
dataset_id = str(dataset_id)
|
||||
tenant_id = str(tenant_id)
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ class MessageListApi(WebApiResource):
|
|||
|
||||
try:
|
||||
return MessageService.pagination_by_first_id(
|
||||
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"], "desc"
|
||||
app_model, end_user, args["conversation_id"], args["first_id"], args["limit"]
|
||||
)
|
||||
except services.errors.conversation.ConversationNotExistsError:
|
||||
raise NotFound("Conversation Not Exists.")
|
||||
|
|
|
|||
|
|
@ -202,7 +202,7 @@ class AgentChatAppRunner(AppRunner):
|
|||
# change function call strategy based on LLM model
|
||||
llm_model = cast(LargeLanguageModel, model_instance.model_type_instance)
|
||||
model_schema = llm_model.get_model_schema(model_instance.model, model_instance.credentials)
|
||||
if not model_schema or not model_schema.features:
|
||||
if not model_schema:
|
||||
raise ValueError("Model schema not found")
|
||||
|
||||
if {ModelFeature.MULTI_TOOL_CALL, ModelFeature.TOOL_CALL}.intersection(model_schema.features or []):
|
||||
|
|
|
|||
|
|
@ -221,13 +221,12 @@ class AIModel(ABC):
|
|||
:param credentials: model credentials
|
||||
:return: model schema
|
||||
"""
|
||||
# get predefined models (predefined_models)
|
||||
models = self.predefined_models()
|
||||
|
||||
model_map = {model.model: model for model in models}
|
||||
if model in model_map:
|
||||
return model_map[model]
|
||||
# Try to get model schema from predefined models
|
||||
for predefined_model in self.predefined_models():
|
||||
if model == predefined_model.model:
|
||||
return predefined_model
|
||||
|
||||
# Try to get model schema from credentials
|
||||
if credentials:
|
||||
model_schema = self.get_customizable_model_schema_from_credentials(model, credentials)
|
||||
if model_schema:
|
||||
|
|
|
|||
|
|
@ -53,6 +53,9 @@ model_credential_schema:
|
|||
type: select
|
||||
required: true
|
||||
options:
|
||||
- label:
|
||||
en_US: 2024-12-01-preview
|
||||
value: 2024-12-01-preview
|
||||
- label:
|
||||
en_US: 2024-10-01-preview
|
||||
value: 2024-10-01-preview
|
||||
|
|
|
|||
|
|
@ -677,16 +677,17 @@ class CohereLargeLanguageModel(LargeLanguageModel):
|
|||
|
||||
:return: model schema
|
||||
"""
|
||||
# get model schema
|
||||
models = self.predefined_models()
|
||||
model_map = {model.model: model for model in models}
|
||||
|
||||
mode = credentials.get("mode")
|
||||
base_model_schema = None
|
||||
for predefined_model in self.predefined_models():
|
||||
if (
|
||||
mode == "chat" and predefined_model.model == "command-light-chat"
|
||||
) or predefined_model.model == "command-light":
|
||||
base_model_schema = predefined_model
|
||||
break
|
||||
|
||||
if mode == "chat":
|
||||
base_model_schema = model_map["command-light-chat"]
|
||||
else:
|
||||
base_model_schema = model_map["command-light"]
|
||||
if not base_model_schema:
|
||||
raise ValueError("Model not found")
|
||||
|
||||
base_model_schema = cast(AIModelEntity, base_model_schema)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: Sao10K/L3-8B-Stheno-v3.2
|
||||
label:
|
||||
zh_Hans: Sao10K/L3-8B-Stheno-v3.2
|
||||
en_US: Sao10K/L3-8B-Stheno-v3.2
|
||||
zh_Hans: L3 8B Stheno V3.2
|
||||
en_US: L3 8B Stheno V3.2
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
# Deepseek Models
|
||||
- deepseek/deepseek-r1
|
||||
- deepseek/deepseek_v3
|
||||
|
||||
# LLaMA Models
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: jondurbin/airoboros-l2-70b
|
||||
label:
|
||||
zh_Hans: jondurbin/airoboros-l2-70b
|
||||
en_US: jondurbin/airoboros-l2-70b
|
||||
zh_Hans: Airoboros L2 70B
|
||||
en_US: Airoboros L2 70B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -0,0 +1,41 @@
|
|||
model: deepseek/deepseek-r1
|
||||
label:
|
||||
zh_Hans: DeepSeek R1
|
||||
en_US: DeepSeek R1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 64000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
min: 0
|
||||
max: 2
|
||||
default: 1
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
min: 0
|
||||
max: 1
|
||||
default: 1
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
min: 1
|
||||
max: 2048
|
||||
default: 512
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
min: -2
|
||||
max: 2
|
||||
default: 0
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
min: -2
|
||||
max: 2
|
||||
default: 0
|
||||
pricing:
|
||||
input: '0.04'
|
||||
output: '0.04'
|
||||
unit: '0.0001'
|
||||
currency: USD
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
model: deepseek/deepseek_v3
|
||||
label:
|
||||
zh_Hans: deepseek/deepseek_v3
|
||||
en_US: deepseek/deepseek_v3
|
||||
zh_Hans: DeepSeek V3
|
||||
en_US: DeepSeek V3
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: cognitivecomputations/dolphin-mixtral-8x22b
|
||||
label:
|
||||
zh_Hans: cognitivecomputations/dolphin-mixtral-8x22b
|
||||
en_US: cognitivecomputations/dolphin-mixtral-8x22b
|
||||
zh_Hans: Dolphin Mixtral 8x22B
|
||||
en_US: Dolphin Mixtral 8x22B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: google/gemma-2-9b-it
|
||||
label:
|
||||
zh_Hans: google/gemma-2-9b-it
|
||||
en_US: google/gemma-2-9b-it
|
||||
zh_Hans: Gemma 2 9B
|
||||
en_US: Gemma 2 9B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: nousresearch/hermes-2-pro-llama-3-8b
|
||||
label:
|
||||
zh_Hans: nousresearch/hermes-2-pro-llama-3-8b
|
||||
en_US: nousresearch/hermes-2-pro-llama-3-8b
|
||||
zh_Hans: Hermes 2 Pro Llama 3 8B
|
||||
en_US: Hermes 2 Pro Llama 3 8B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: sao10k/l3-70b-euryale-v2.1
|
||||
label:
|
||||
zh_Hans: sao10k/l3-70b-euryale-v2.1
|
||||
en_US: sao10k/l3-70b-euryale-v2.1
|
||||
zh_Hans: "L3 70B Euryale V2.1\t"
|
||||
en_US: "L3 70B Euryale V2.1\t"
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: sao10k/l3-8b-lunaris
|
||||
label:
|
||||
zh_Hans: sao10k/l3-8b-lunaris
|
||||
en_US: sao10k/l3-8b-lunaris
|
||||
zh_Hans: "Sao10k L3 8B Lunaris"
|
||||
en_US: "Sao10k L3 8B Lunaris"
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: sao10k/l31-70b-euryale-v2.2
|
||||
label:
|
||||
zh_Hans: sao10k/l31-70b-euryale-v2.2
|
||||
en_US: sao10k/l31-70b-euryale-v2.2
|
||||
zh_Hans: L31 70B Euryale V2.2
|
||||
en_US: L31 70B Euryale V2.2
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3-70b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3-70b-instruct
|
||||
en_US: meta-llama/llama-3-70b-instruct
|
||||
zh_Hans: Llama3 70b Instruct
|
||||
en_US: Llama3 70b Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3-8b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3-8b-instruct
|
||||
en_US: meta-llama/llama-3-8b-instruct
|
||||
zh_Hans: Llama 3 8B Instruct
|
||||
en_US: Llama 3 8B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.1-70b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.1-70b-instruct
|
||||
en_US: meta-llama/llama-3.1-70b-instruct
|
||||
zh_Hans: Llama 3.1 70B Instruct
|
||||
en_US: Llama 3.1 70B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.1-8b-instruct-bf16
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.1-8b-instruct-bf16
|
||||
en_US: meta-llama/llama-3.1-8b-instruct-bf16
|
||||
zh_Hans: Llama 3.1 8B Instruct BF16
|
||||
en_US: Llama 3.1 8B Instruct BF16
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.1-8b-instruct-max
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.1-8b-instruct-max
|
||||
en_US: meta-llama/llama-3.1-8b-instruct-max
|
||||
zh_Hans: "Llama3.1 8B Instruct Max\t"
|
||||
en_US: "Llama3.1 8B Instruct Max\t"
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.1-8b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.1-8b-instruct
|
||||
en_US: meta-llama/llama-3.1-8b-instruct
|
||||
zh_Hans: Llama 3.1 8B Instruct
|
||||
en_US: Llama 3.1 8B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.2-11b-vision-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.2-11b-vision-instruct
|
||||
en_US: meta-llama/llama-3.2-11b-vision-instruct
|
||||
zh_Hans: "Llama 3.2 11B Vision Instruct\t"
|
||||
en_US: "Llama 3.2 11B Vision Instruct\t"
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.2-1b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.2-1b-instruct
|
||||
en_US: meta-llama/llama-3.2-1b-instruct
|
||||
zh_Hans: "Llama 3.2 1B Instruct\t"
|
||||
en_US: "Llama 3.2 1B Instruct\t"
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.2-3b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.2-3b-instruct
|
||||
en_US: meta-llama/llama-3.2-3b-instruct
|
||||
zh_Hans: Llama 3.2 3B Instruct
|
||||
en_US: Llama 3.2 3B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: meta-llama/llama-3.3-70b-instruct
|
||||
label:
|
||||
zh_Hans: meta-llama/llama-3.3-70b-instruct
|
||||
en_US: meta-llama/llama-3.3-70b-instruct
|
||||
zh_Hans: Llama 3.3 70B Instruct
|
||||
en_US: Llama 3.3 70B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: sophosympatheia/midnight-rose-70b
|
||||
label:
|
||||
zh_Hans: sophosympatheia/midnight-rose-70b
|
||||
en_US: sophosympatheia/midnight-rose-70b
|
||||
zh_Hans: Midnight Rose 70B
|
||||
en_US: Midnight Rose 70B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: mistralai/mistral-7b-instruct
|
||||
label:
|
||||
zh_Hans: mistralai/mistral-7b-instruct
|
||||
en_US: mistralai/mistral-7b-instruct
|
||||
zh_Hans: Mistral 7B Instruct
|
||||
en_US: Mistral 7B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: mistralai/mistral-nemo
|
||||
label:
|
||||
zh_Hans: mistralai/mistral-nemo
|
||||
en_US: mistralai/mistral-nemo
|
||||
zh_Hans: Mistral Nemo
|
||||
en_US: Mistral Nemo
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: gryphe/mythomax-l2-13b
|
||||
label:
|
||||
zh_Hans: gryphe/mythomax-l2-13b
|
||||
en_US: gryphe/mythomax-l2-13b
|
||||
zh_Hans: Mythomax L2 13B
|
||||
en_US: Mythomax L2 13B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: nousresearch/nous-hermes-llama2-13b
|
||||
label:
|
||||
zh_Hans: nousresearch/nous-hermes-llama2-13b
|
||||
en_US: nousresearch/nous-hermes-llama2-13b
|
||||
zh_Hans: Nous Hermes Llama2 13B
|
||||
en_US: Nous Hermes Llama2 13B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: openchat/openchat-7b
|
||||
label:
|
||||
zh_Hans: openchat/openchat-7b
|
||||
en_US: openchat/openchat-7b
|
||||
zh_Hans: OpenChat 7B
|
||||
en_US: OpenChat 7B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: teknium/openhermes-2.5-mistral-7b
|
||||
label:
|
||||
zh_Hans: teknium/openhermes-2.5-mistral-7b
|
||||
en_US: teknium/openhermes-2.5-mistral-7b
|
||||
zh_Hans: Openhermes2.5 Mistral 7B
|
||||
en_US: Openhermes2.5 Mistral 7B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: qwen/qwen-2-72b-instruct
|
||||
label:
|
||||
zh_Hans: qwen/qwen-2-72b-instruct
|
||||
en_US: qwen/qwen-2-72b-instruct
|
||||
zh_Hans: Qwen2 72B Instruct
|
||||
en_US: Qwen2 72B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: qwen/qwen-2-7b-instruct
|
||||
label:
|
||||
zh_Hans: qwen/qwen-2-7b-instruct
|
||||
en_US: qwen/qwen-2-7b-instruct
|
||||
zh_Hans: Qwen 2 7B Instruct
|
||||
en_US: Qwen 2 7B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: qwen/qwen-2-vl-72b-instruct
|
||||
label:
|
||||
zh_Hans: qwen/qwen-2-vl-72b-instruct
|
||||
en_US: qwen/qwen-2-vl-72b-instruct
|
||||
zh_Hans: Qwen 2 VL 72B Instruct
|
||||
en_US: Qwen 2 VL 72B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: qwen/qwen-2.5-72b-instruct
|
||||
label:
|
||||
zh_Hans: qwen/qwen-2.5-72b-instruct
|
||||
en_US: qwen/qwen-2.5-72b-instruct
|
||||
zh_Hans: Qwen 2.5 72B Instruct
|
||||
en_US: Qwen 2.5 72B Instruct
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
model: microsoft/wizardlm-2-8x22b
|
||||
label:
|
||||
zh_Hans: microsoft/wizardlm-2-8x22b
|
||||
en_US: microsoft/wizardlm-2-8x22b
|
||||
zh_Hans: Wizardlm 2 8x22B
|
||||
en_US: Wizardlm 2 8x22B
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ icon_small:
|
|||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
en_US: icon_l_en.svg
|
||||
background: "#eadeff"
|
||||
background: "#c7fce2"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your API key from Novita AI
|
||||
|
|
|
|||
|
|
@ -341,9 +341,6 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||
:param credentials: provider credentials
|
||||
:return:
|
||||
"""
|
||||
# get predefined models
|
||||
predefined_models = self.predefined_models()
|
||||
predefined_models_map = {model.model: model for model in predefined_models}
|
||||
|
||||
# transform credentials to kwargs for model instance
|
||||
credentials_kwargs = self._to_credential_kwargs(credentials)
|
||||
|
|
@ -359,9 +356,10 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||
base_model = model.id.split(":")[1]
|
||||
|
||||
base_model_schema = None
|
||||
for predefined_model_name, predefined_model in predefined_models_map.items():
|
||||
if predefined_model_name in base_model:
|
||||
for predefined_model in self.predefined_models():
|
||||
if predefined_model.model in base_model:
|
||||
base_model_schema = predefined_model
|
||||
break
|
||||
|
||||
if not base_model_schema:
|
||||
continue
|
||||
|
|
@ -1186,12 +1184,14 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
|
|||
base_model = model.split(":")[1]
|
||||
|
||||
# get model schema
|
||||
models = self.predefined_models()
|
||||
model_map = {model.model: model for model in models}
|
||||
if base_model not in model_map:
|
||||
raise ValueError(f"Base model {base_model} not found")
|
||||
base_model_schema = None
|
||||
for predefined_model in self.predefined_models():
|
||||
if base_model == predefined_model.model:
|
||||
base_model_schema = predefined_model
|
||||
break
|
||||
|
||||
base_model_schema = model_map[base_model]
|
||||
if not base_model_schema:
|
||||
raise ValueError(f"Base model {base_model} not found")
|
||||
|
||||
base_model_schema_features = base_model_schema.features or []
|
||||
base_model_schema_model_properties = base_model_schema.model_properties
|
||||
|
|
|
|||
|
|
@ -1,29 +1,13 @@
|
|||
import json
|
||||
import time
|
||||
from decimal import Decimal
|
||||
from typing import Optional
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
from core.entities.embedding_type import EmbeddingInputType
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.model_entities import (
|
||||
AIModelEntity,
|
||||
FetchFrom,
|
||||
ModelPropertyKey,
|
||||
ModelType,
|
||||
PriceConfig,
|
||||
PriceType,
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
|
||||
OAICompatEmbeddingModel,
|
||||
)
|
||||
from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel
|
||||
from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat
|
||||
|
||||
|
||||
class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
|
||||
class PerfXCloudEmbeddingModel(OAICompatEmbeddingModel):
|
||||
"""
|
||||
Model class for an OpenAI API-compatible text embedding model.
|
||||
"""
|
||||
|
|
@ -47,86 +31,10 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
|
|||
:return: embeddings result
|
||||
"""
|
||||
|
||||
# Prepare headers and payload for the request
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
api_key = credentials.get("api_key")
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
endpoint_url: Optional[str]
|
||||
if "endpoint_url" not in credentials or credentials["endpoint_url"] == "":
|
||||
endpoint_url = "https://cloud.perfxlab.cn/v1/"
|
||||
else:
|
||||
endpoint_url = credentials.get("endpoint_url")
|
||||
assert endpoint_url is not None, "endpoint_url is required in credentials"
|
||||
if not endpoint_url.endswith("/"):
|
||||
endpoint_url += "/"
|
||||
credentials["endpoint_url"] = "https://cloud.perfxlab.cn/v1/"
|
||||
|
||||
assert isinstance(endpoint_url, str)
|
||||
endpoint_url = urljoin(endpoint_url, "embeddings")
|
||||
|
||||
extra_model_kwargs = {}
|
||||
if user:
|
||||
extra_model_kwargs["user"] = user
|
||||
|
||||
extra_model_kwargs["encoding_format"] = "float"
|
||||
|
||||
# get model properties
|
||||
context_size = self._get_context_size(model, credentials)
|
||||
max_chunks = self._get_max_chunks(model, credentials)
|
||||
|
||||
inputs = []
|
||||
indices = []
|
||||
used_tokens = 0
|
||||
|
||||
for i, text in enumerate(texts):
|
||||
# Here token count is only an approximation based on the GPT2 tokenizer
|
||||
# TODO: Optimize for better token estimation and chunking
|
||||
num_tokens = self._get_num_tokens_by_gpt2(text)
|
||||
|
||||
if num_tokens >= context_size:
|
||||
cutoff = int(np.floor(len(text) * (context_size / num_tokens)))
|
||||
# if num tokens is larger than context length, only use the start
|
||||
inputs.append(text[0:cutoff])
|
||||
else:
|
||||
inputs.append(text)
|
||||
indices += [i]
|
||||
|
||||
batched_embeddings = []
|
||||
_iter = range(0, len(inputs), max_chunks)
|
||||
|
||||
for i in _iter:
|
||||
# Prepare the payload for the request
|
||||
payload = {"input": inputs[i : i + max_chunks], "model": model, **extra_model_kwargs}
|
||||
|
||||
# Make the request to the OpenAI API
|
||||
response = requests.post(endpoint_url, headers=headers, data=json.dumps(payload), timeout=(10, 300))
|
||||
|
||||
response.raise_for_status() # Raise an exception for HTTP errors
|
||||
response_data = response.json()
|
||||
|
||||
# Extract embeddings and used tokens from the response
|
||||
embeddings_batch = [data["embedding"] for data in response_data["data"]]
|
||||
embedding_used_tokens = response_data["usage"]["total_tokens"]
|
||||
|
||||
used_tokens += embedding_used_tokens
|
||||
batched_embeddings += embeddings_batch
|
||||
|
||||
# calc usage
|
||||
usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens)
|
||||
|
||||
return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model)
|
||||
|
||||
def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int:
|
||||
"""
|
||||
Approximate number of tokens for given messages using GPT2 tokenizer
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param texts: texts to embed
|
||||
:return:
|
||||
"""
|
||||
return sum(self._get_num_tokens_by_gpt2(text) for text in texts)
|
||||
return OAICompatEmbeddingModel._invoke(self, model, credentials, texts, user, input_type)
|
||||
|
||||
def validate_credentials(self, model: str, credentials: dict) -> None:
|
||||
"""
|
||||
|
|
@ -136,93 +44,7 @@ class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel):
|
|||
:param credentials: model credentials
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if "endpoint_url" not in credentials or credentials["endpoint_url"] == "":
|
||||
credentials["endpoint_url"] = "https://cloud.perfxlab.cn/v1/"
|
||||
|
||||
api_key = credentials.get("api_key")
|
||||
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
|
||||
endpoint_url: Optional[str]
|
||||
if "endpoint_url" not in credentials or credentials["endpoint_url"] == "":
|
||||
endpoint_url = "https://cloud.perfxlab.cn/v1/"
|
||||
else:
|
||||
endpoint_url = credentials.get("endpoint_url")
|
||||
assert endpoint_url is not None, "endpoint_url is required in credentials"
|
||||
if not endpoint_url.endswith("/"):
|
||||
endpoint_url += "/"
|
||||
|
||||
assert isinstance(endpoint_url, str)
|
||||
endpoint_url = urljoin(endpoint_url, "embeddings")
|
||||
|
||||
payload = {"input": "ping", "model": model}
|
||||
|
||||
response = requests.post(url=endpoint_url, headers=headers, data=json.dumps(payload), timeout=(10, 300))
|
||||
|
||||
if response.status_code != 200:
|
||||
raise CredentialsValidateFailedError(
|
||||
f"Credentials validation failed with status code {response.status_code}"
|
||||
)
|
||||
|
||||
try:
|
||||
json_result = response.json()
|
||||
except json.JSONDecodeError as e:
|
||||
raise CredentialsValidateFailedError("Credentials validation failed: JSON decode error")
|
||||
|
||||
if "model" not in json_result:
|
||||
raise CredentialsValidateFailedError("Credentials validation failed: invalid response")
|
||||
except CredentialsValidateFailedError:
|
||||
raise
|
||||
except Exception as ex:
|
||||
raise CredentialsValidateFailedError(str(ex))
|
||||
|
||||
def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity:
|
||||
"""
|
||||
generate custom model entities from credentials
|
||||
"""
|
||||
entity = AIModelEntity(
|
||||
model=model,
|
||||
label=I18nObject(en_US=model),
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
fetch_from=FetchFrom.CUSTOMIZABLE_MODEL,
|
||||
model_properties={
|
||||
ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)),
|
||||
ModelPropertyKey.MAX_CHUNKS: 1,
|
||||
},
|
||||
parameter_rules=[],
|
||||
pricing=PriceConfig(
|
||||
input=Decimal(credentials.get("input_price", 0)),
|
||||
unit=Decimal(credentials.get("unit", 0)),
|
||||
currency=credentials.get("currency", "USD"),
|
||||
),
|
||||
)
|
||||
|
||||
return entity
|
||||
|
||||
def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage:
|
||||
"""
|
||||
Calculate response usage
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param tokens: input tokens
|
||||
:return: usage
|
||||
"""
|
||||
# get input price info
|
||||
input_price_info = self.get_price(
|
||||
model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens
|
||||
)
|
||||
|
||||
# transform usage
|
||||
usage = EmbeddingUsage(
|
||||
tokens=tokens,
|
||||
total_tokens=tokens,
|
||||
unit_price=input_price_info.unit_price,
|
||||
price_unit=input_price_info.unit,
|
||||
total_price=input_price_info.total_amount,
|
||||
currency=input_price_info.currency,
|
||||
latency=time.perf_counter() - self.started_at,
|
||||
)
|
||||
|
||||
return usage
|
||||
OAICompatEmbeddingModel.validate_credentials(self, model, credentials)
|
||||
|
|
|
|||
|
|
@ -1,9 +1,16 @@
|
|||
import json
|
||||
from collections.abc import Generator
|
||||
from typing import Optional, Union
|
||||
|
||||
import requests
|
||||
|
||||
from core.model_runtime.entities.common_entities import I18nObject
|
||||
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult
|
||||
from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool
|
||||
from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessage,
|
||||
PromptMessageTool,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import (
|
||||
AIModelEntity,
|
||||
FetchFrom,
|
||||
|
|
@ -89,3 +96,208 @@ class SiliconflowLargeLanguageModel(OAIAPICompatLargeLanguageModel):
|
|||
),
|
||||
],
|
||||
)
|
||||
|
||||
def _handle_generate_stream_response(
|
||||
self, model: str, credentials: dict, response: requests.Response, prompt_messages: list[PromptMessage]
|
||||
) -> Generator:
|
||||
"""
|
||||
Handle llm stream response
|
||||
|
||||
:param model: model name
|
||||
:param credentials: model credentials
|
||||
:param response: streamed response
|
||||
:param prompt_messages: prompt messages
|
||||
:return: llm response chunk generator
|
||||
"""
|
||||
full_assistant_content = ""
|
||||
chunk_index = 0
|
||||
is_reasoning_started = False # Add flag to track reasoning state
|
||||
|
||||
def create_final_llm_result_chunk(
|
||||
id: Optional[str], index: int, message: AssistantPromptMessage, finish_reason: str, usage: dict
|
||||
) -> LLMResultChunk:
|
||||
# calculate num tokens
|
||||
prompt_tokens = usage and usage.get("prompt_tokens")
|
||||
if prompt_tokens is None:
|
||||
prompt_tokens = self._num_tokens_from_string(model, prompt_messages[0].content)
|
||||
completion_tokens = usage and usage.get("completion_tokens")
|
||||
if completion_tokens is None:
|
||||
completion_tokens = self._num_tokens_from_string(model, full_assistant_content)
|
||||
|
||||
# transform usage
|
||||
usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens)
|
||||
|
||||
return LLMResultChunk(
|
||||
id=id,
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(index=index, message=message, finish_reason=finish_reason, usage=usage),
|
||||
)
|
||||
|
||||
# delimiter for stream response, need unicode_escape
|
||||
import codecs
|
||||
|
||||
delimiter = credentials.get("stream_mode_delimiter", "\n\n")
|
||||
delimiter = codecs.decode(delimiter, "unicode_escape")
|
||||
|
||||
tools_calls: list[AssistantPromptMessage.ToolCall] = []
|
||||
|
||||
def increase_tool_call(new_tool_calls: list[AssistantPromptMessage.ToolCall]):
|
||||
def get_tool_call(tool_call_id: str):
|
||||
if not tool_call_id:
|
||||
return tools_calls[-1]
|
||||
|
||||
tool_call = next((tool_call for tool_call in tools_calls if tool_call.id == tool_call_id), None)
|
||||
if tool_call is None:
|
||||
tool_call = AssistantPromptMessage.ToolCall(
|
||||
id=tool_call_id,
|
||||
type="function",
|
||||
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments=""),
|
||||
)
|
||||
tools_calls.append(tool_call)
|
||||
|
||||
return tool_call
|
||||
|
||||
for new_tool_call in new_tool_calls:
|
||||
# get tool call
|
||||
tool_call = get_tool_call(new_tool_call.function.name)
|
||||
# update tool call
|
||||
if new_tool_call.id:
|
||||
tool_call.id = new_tool_call.id
|
||||
if new_tool_call.type:
|
||||
tool_call.type = new_tool_call.type
|
||||
if new_tool_call.function.name:
|
||||
tool_call.function.name = new_tool_call.function.name
|
||||
if new_tool_call.function.arguments:
|
||||
tool_call.function.arguments += new_tool_call.function.arguments
|
||||
|
||||
finish_reason = None # The default value of finish_reason is None
|
||||
message_id, usage = None, None
|
||||
for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter):
|
||||
chunk = chunk.strip()
|
||||
if chunk:
|
||||
# ignore sse comments
|
||||
if chunk.startswith(":"):
|
||||
continue
|
||||
decoded_chunk = chunk.strip().removeprefix("data:").lstrip()
|
||||
if decoded_chunk == "[DONE]": # Some provider returns "data: [DONE]"
|
||||
continue
|
||||
|
||||
try:
|
||||
chunk_json: dict = json.loads(decoded_chunk)
|
||||
# stream ended
|
||||
except json.JSONDecodeError as e:
|
||||
yield create_final_llm_result_chunk(
|
||||
id=message_id,
|
||||
index=chunk_index + 1,
|
||||
message=AssistantPromptMessage(content=""),
|
||||
finish_reason="Non-JSON encountered.",
|
||||
usage=usage,
|
||||
)
|
||||
break
|
||||
# handle the error here. for issue #11629
|
||||
if chunk_json.get("error") and chunk_json.get("choices") is None:
|
||||
raise ValueError(chunk_json.get("error"))
|
||||
|
||||
if chunk_json:
|
||||
if u := chunk_json.get("usage"):
|
||||
usage = u
|
||||
if not chunk_json or len(chunk_json["choices"]) == 0:
|
||||
continue
|
||||
|
||||
choice = chunk_json["choices"][0]
|
||||
finish_reason = chunk_json["choices"][0].get("finish_reason")
|
||||
message_id = chunk_json.get("id")
|
||||
chunk_index += 1
|
||||
|
||||
if "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
delta_content = delta.get("content")
|
||||
|
||||
assistant_message_tool_calls = None
|
||||
|
||||
if "tool_calls" in delta and credentials.get("function_calling_type", "no_call") == "tool_call":
|
||||
assistant_message_tool_calls = delta.get("tool_calls", None)
|
||||
elif (
|
||||
"function_call" in delta
|
||||
and credentials.get("function_calling_type", "no_call") == "function_call"
|
||||
):
|
||||
assistant_message_tool_calls = [
|
||||
{"id": "tool_call_id", "type": "function", "function": delta.get("function_call", {})}
|
||||
]
|
||||
|
||||
# assistant_message_function_call = delta.delta.function_call
|
||||
|
||||
# extract tool calls from response
|
||||
if assistant_message_tool_calls:
|
||||
tool_calls = self._extract_response_tool_calls(assistant_message_tool_calls)
|
||||
increase_tool_call(tool_calls)
|
||||
|
||||
if delta_content is None or delta_content == "":
|
||||
continue
|
||||
|
||||
# Check for think tags
|
||||
if "<think>" in delta_content:
|
||||
is_reasoning_started = True
|
||||
# Remove <think> tag and add markdown quote
|
||||
delta_content = "> 💭 " + delta_content.replace("<think>", "")
|
||||
elif "</think>" in delta_content:
|
||||
# Remove </think> tag and add newlines to end quote block
|
||||
delta_content = delta_content.replace("</think>", "") + "\n\n"
|
||||
is_reasoning_started = False
|
||||
elif is_reasoning_started:
|
||||
# Add quote markers for content within thinking block
|
||||
if "\n\n" in delta_content:
|
||||
delta_content = delta_content.replace("\n\n", "\n> ")
|
||||
elif "\n" in delta_content:
|
||||
delta_content = delta_content.replace("\n", "\n> ")
|
||||
|
||||
# transform assistant message to prompt message
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content=delta_content,
|
||||
)
|
||||
|
||||
# reset tool calls
|
||||
tool_calls = []
|
||||
full_assistant_content += delta_content
|
||||
elif "text" in choice:
|
||||
choice_text = choice.get("text", "")
|
||||
if choice_text == "":
|
||||
continue
|
||||
|
||||
# transform assistant message to prompt message
|
||||
assistant_prompt_message = AssistantPromptMessage(content=choice_text)
|
||||
full_assistant_content += choice_text
|
||||
else:
|
||||
continue
|
||||
|
||||
yield LLMResultChunk(
|
||||
id=message_id,
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=chunk_index,
|
||||
message=assistant_prompt_message,
|
||||
),
|
||||
)
|
||||
|
||||
chunk_index += 1
|
||||
|
||||
if tools_calls:
|
||||
yield LLMResultChunk(
|
||||
id=message_id,
|
||||
model=model,
|
||||
prompt_messages=prompt_messages,
|
||||
delta=LLMResultChunkDelta(
|
||||
index=chunk_index,
|
||||
message=AssistantPromptMessage(tool_calls=tools_calls, content=""),
|
||||
),
|
||||
)
|
||||
|
||||
yield create_final_llm_result_chunk(
|
||||
id=message_id,
|
||||
index=chunk_index,
|
||||
message=AssistantPromptMessage(content=""),
|
||||
finish_reason=finish_reason,
|
||||
usage=usage,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@
|
|||
- qwen2.5-3b-instruct
|
||||
- qwen2.5-1.5b-instruct
|
||||
- qwen2.5-0.5b-instruct
|
||||
- qwen2.5-14b-instruct-1m
|
||||
- qwen2.5-7b-instruct-1m
|
||||
- qwen2.5-coder-7b-instruct
|
||||
- qwen2-math-72b-instruct
|
||||
- qwen2-math-7b-instruct
|
||||
|
|
|
|||
|
|
@ -219,8 +219,12 @@ class TongyiLargeLanguageModel(LargeLanguageModel):
|
|||
if response.status_code not in {200, HTTPStatus.OK}:
|
||||
raise ServiceUnavailableError(response.message)
|
||||
# transform assistant message to prompt message
|
||||
resp_content = response.output.choices[0].message.content
|
||||
# special for qwen-vl
|
||||
if isinstance(resp_content, list):
|
||||
resp_content = resp_content[0]["text"]
|
||||
assistant_prompt_message = AssistantPromptMessage(
|
||||
content=response.output.choices[0].message.content,
|
||||
content=resp_content,
|
||||
)
|
||||
|
||||
# transform usage
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
model: qwen2.5-14b-instruct-1m
|
||||
label:
|
||||
en_US: qwen2.5-14b-instruct-1m
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1000000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.003'
|
||||
unit: '0.001'
|
||||
currency: RMB
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
# for more details, please refer to https://help.aliyun.com/zh/model-studio/getting-started/models
|
||||
model: qwen2.5-7b-instruct-1m
|
||||
label:
|
||||
en_US: qwen2.5-7b-instruct-1m
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1000000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
type: float
|
||||
default: 0.3
|
||||
min: 0.0
|
||||
max: 2.0
|
||||
help:
|
||||
zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。
|
||||
en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain.
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
type: int
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
help:
|
||||
zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。
|
||||
en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time.
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
type: float
|
||||
default: 0.8
|
||||
min: 0.1
|
||||
max: 0.9
|
||||
help:
|
||||
zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。
|
||||
en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated.
|
||||
- name: top_k
|
||||
type: int
|
||||
min: 0
|
||||
max: 99
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
help:
|
||||
zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。
|
||||
en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated.
|
||||
- name: seed
|
||||
required: false
|
||||
type: int
|
||||
default: 1234
|
||||
label:
|
||||
zh_Hans: 随机种子
|
||||
en_US: Random seed
|
||||
help:
|
||||
zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。
|
||||
en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time.
|
||||
- name: repetition_penalty
|
||||
required: false
|
||||
type: float
|
||||
default: 1.1
|
||||
label:
|
||||
zh_Hans: 重复惩罚
|
||||
en_US: Repetition penalty
|
||||
help:
|
||||
zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。
|
||||
en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment.
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.0005'
|
||||
output: '0.001'
|
||||
unit: '0.001'
|
||||
currency: RMB
|
||||
|
|
@ -13,9 +13,10 @@ class FirecrawlWebExtractor(BaseExtractor):
|
|||
api_key: The API key for Firecrawl.
|
||||
base_url: The base URL for the Firecrawl API. Defaults to 'https://api.firecrawl.dev'.
|
||||
mode: The mode of operation. Defaults to 'scrape'. Options are 'crawl', 'scrape' and 'crawl_return_urls'.
|
||||
only_main_content: Only return the main content of the page excluding headers, navs, footers, etc.
|
||||
"""
|
||||
|
||||
def __init__(self, url: str, job_id: str, tenant_id: str, mode: str = "crawl", only_main_content: bool = False):
|
||||
def __init__(self, url: str, job_id: str, tenant_id: str, mode: str = "crawl", only_main_content: bool = True):
|
||||
"""Initialize with url, api_key, base_url and mode."""
|
||||
self._url = url
|
||||
self.job_id = job_id
|
||||
|
|
|
|||
|
|
@ -185,6 +185,8 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
result_text = event.text
|
||||
usage = event.usage
|
||||
finish_reason = event.finish_reason
|
||||
# deduct quota
|
||||
self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
break
|
||||
except LLMNodeError as e:
|
||||
yield RunCompletedEvent(
|
||||
|
|
@ -240,17 +242,7 @@ class LLMNode(BaseNode[LLMNodeData]):
|
|||
user=self.user_id,
|
||||
)
|
||||
|
||||
# handle invoke result
|
||||
generator = self._handle_invoke_result(invoke_result=invoke_result)
|
||||
|
||||
usage = LLMUsage.empty_usage()
|
||||
for event in generator:
|
||||
yield event
|
||||
if isinstance(event, ModelInvokeCompletedEvent):
|
||||
usage = event.usage
|
||||
|
||||
# deduct quota
|
||||
self.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
return self._handle_invoke_result(invoke_result=invoke_result)
|
||||
|
||||
def _handle_invoke_result(self, invoke_result: LLMResult | Generator) -> Generator[NodeEvent, None, None]:
|
||||
if isinstance(invoke_result, LLMResult):
|
||||
|
|
|
|||
|
|
@ -20,11 +20,11 @@ if [[ "${MODE}" == "worker" ]]; then
|
|||
CONCURRENCY_OPTION="-c ${CELERY_WORKER_AMOUNT:-1}"
|
||||
fi
|
||||
|
||||
exec celery -A app.celery worker -P ${CELERY_WORKER_CLASS:-gevent} $CONCURRENCY_OPTION --loglevel ${LOG_LEVEL} \
|
||||
exec celery -A app.celery worker -P ${CELERY_WORKER_CLASS:-gevent} $CONCURRENCY_OPTION --loglevel ${LOG_LEVEL:-INFO} \
|
||||
-Q ${CELERY_QUEUES:-dataset,mail,ops_trace,app_deletion}
|
||||
|
||||
elif [[ "${MODE}" == "beat" ]]; then
|
||||
exec celery -A app.celery beat --loglevel ${LOG_LEVEL}
|
||||
exec celery -A app.celery beat --loglevel ${LOG_LEVEL:-INFO}
|
||||
else
|
||||
if [[ "${DEBUG}" == "true" ]]; then
|
||||
exec flask run --host=${DIFY_BIND_ADDRESS:-0.0.0.0} --port=${DIFY_PORT:-5001} --debug
|
||||
|
|
|
|||
|
|
@ -27,12 +27,11 @@ def init_app(app: DifyApp):
|
|||
# Always add StreamHandler to log to console
|
||||
sh = logging.StreamHandler(sys.stdout)
|
||||
sh.addFilter(RequestIdFilter())
|
||||
log_formatter = logging.Formatter(fmt=dify_config.LOG_FORMAT)
|
||||
sh.setFormatter(log_formatter)
|
||||
log_handlers.append(sh)
|
||||
|
||||
logging.basicConfig(
|
||||
level=dify_config.LOG_LEVEL,
|
||||
format=dify_config.LOG_FORMAT,
|
||||
datefmt=dify_config.LOG_DATEFORMAT,
|
||||
handlers=log_handlers,
|
||||
force=True,
|
||||
|
|
|
|||
|
|
@ -1066,8 +1066,10 @@ class Message(db.Model): # type: ignore[name-defined]
|
|||
"id": self.id,
|
||||
"app_id": self.app_id,
|
||||
"conversation_id": self.conversation_id,
|
||||
"model_id": self.model_id,
|
||||
"inputs": self.inputs,
|
||||
"query": self.query,
|
||||
"total_price": self.total_price,
|
||||
"message": self.message,
|
||||
"answer": self.answer,
|
||||
"status": self.status,
|
||||
|
|
@ -1088,7 +1090,9 @@ class Message(db.Model): # type: ignore[name-defined]
|
|||
id=data["id"],
|
||||
app_id=data["app_id"],
|
||||
conversation_id=data["conversation_id"],
|
||||
model_id=data["model_id"],
|
||||
inputs=data["inputs"],
|
||||
total_price=data["total_price"],
|
||||
query=data["query"],
|
||||
message=data["message"],
|
||||
answer=data["answer"],
|
||||
|
|
|
|||
|
|
@ -922,7 +922,7 @@ version = "1.9.0"
|
|||
description = "Fast, simple object-to-object and broadcast signaling"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"},
|
||||
|
|
@ -1043,10 +1043,6 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5dab0844f2cf82be357a0eb11a9087f70c5430b2c241493fc122bb6f2bb0917c"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e4fe605b917c70283db7dfe5ada75e04561479075761a0b3866c081d035b01c1"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:1e9a65b5736232e7a7f91ff3d02277f11d339bf34099a56cdab6a8b3410a02b2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:58d4b711689366d4a03ac7957ab8c28890415e267f9b6589969e74b6e42225ec"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"},
|
||||
{file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"},
|
||||
|
|
@ -1059,14 +1055,8 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c247dd99d39e0338a604f8c2b3bc7061d5c2e9e2ac7ba9cc1be5a69cb6cd832f"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1b2c248cd517c222d89e74669a4adfa5577e06ab68771a529060cf5a156e9757"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a24c50840d89ded6c9a8fdc7b6ed3692ed4e86f1c4a4a938e1e92def92933e0"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f31859074d57b4639318523d6ffdca586ace54271a73ad23ad021acd807eb14b"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"},
|
||||
{file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:32d95b80260d79926f5fab3c41701dbb818fde1c9da590e77e571eefd14abe28"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b760c65308ff1e462f65d69c12e4ae085cff3b332d894637f6273a12a482d09f"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"},
|
||||
|
|
@ -1077,24 +1067,8 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:87a3044c3a35055527ac75e419dfa9f4f3667a1e887ee80360589eb8c90aabb9"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c5529b34c1c9d937168297f2c1fde7ebe9ebdd5e121297ff9c043bdb2ae3d6fb"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca63e1890ede90b2e4454f9a65135a4d387a4585ff8282bb72964fab893f2111"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e79e6520141d792237c70bcd7a3b122d00f2613769ae0cb61c52e89fd3443839"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"},
|
||||
{file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8bf32b98b75c13ec7cf774164172683d6e7891088f6316e54425fde1efc276d5"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7bc37c4d6b87fb1017ea28c9508b36bbcb0c3d18b4260fcdf08b200c74a6aee8"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c0ef38c7a7014ffac184db9e04debe495d317cc9c6fb10071f7fefd93100a4f"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91d7cc2a76b5567591d12c01f019dd7afce6ba8cba6571187e21e2fc418ae648"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a93dde851926f4f2678e704fadeb39e16c35d8baebd5252c9fd94ce8ce68c4a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0db75f47be8b8abc8d9e31bc7aad0547ca26f24a54e6fd10231d623f183d089"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6967ced6730aed543b8673008b5a391c3b1076d834ca438bbd70635c73775368"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7eedaa5d036d9336c95915035fb57422054014ebdeb6f3b42eac809928e40d0c"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d487f5432bf35b60ed625d7e1b448e2dc855422e87469e3f450aa5552b0eb284"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:832436e59afb93e1836081a20f324cb185836c617659b07b129141a8426973c7"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win32.whl", hash = "sha256:43395e90523f9c23a3d5bdf004733246fba087f2948f87ab28015f12359ca6a0"},
|
||||
{file = "Brotli-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:9011560a466d2eb3f5a6e4929cf4a09be405c64154e12df0dd72713f6500e32b"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"},
|
||||
|
|
@ -1104,10 +1078,6 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:aea440a510e14e818e67bfc4027880e2fb500c2ccb20ab21c7a7c8b5b4703d75"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:6974f52a02321b36847cd19d1b8e381bf39939c21efd6ee2fc13a28b0d99348c"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:a7e53012d2853a07a4a79c00643832161a910674a893d296c9f1259859a289d2"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:d7702622a8b40c49bffb46e1e3ba2e81268d5c04a34f460978c6b5517a34dd52"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"},
|
||||
{file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"},
|
||||
|
|
@ -1119,10 +1089,6 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cb1dac1770878ade83f2ccdf7d25e494f05c9165f5246b46a621cc849341dc01"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:3ee8a80d67a4334482d9712b8e83ca6b1d9bc7e351931252ebef5d8f7335a547"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:5e55da2c8724191e5b557f8e18943b1b4839b8efc3ef60d65985bcf6f587dd38"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:d342778ef319e1026af243ed0a07c97acf3bad33b9f29e7ae6a1f68fd083e90c"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"},
|
||||
{file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"},
|
||||
|
|
@ -1135,10 +1101,6 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d2b35ca2c7f81d173d2fadc2f4f31e88cc5f7a39ae5b6db5513cf3383b0e0ec7"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:af6fa6817889314555aede9a919612b23739395ce767fe7fcbea9a80bf140fe5"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:2feb1d960f760a575dbc5ab3b1c00504b24caaf6986e2dc2b01c09c87866a943"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4410f84b33374409552ac9b6903507cdb31cd30d2501fc5ca13d18f73548444a"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"},
|
||||
{file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"},
|
||||
|
|
@ -1151,10 +1113,6 @@ files = [
|
|||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0737ddb3068957cf1b054899b0883830bb1fec522ec76b1098f9b6e0f02d9419"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4f3607b129417e111e30637af1b56f24f7a49e64763253bbc275c75fa887d4b2"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6c6e0c425f22c1c719c42670d561ad682f7bfeeef918edea971a79ac5252437f"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:494994f807ba0b92092a163a0a283961369a65f6cbe01e8891132b7a320e61eb"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"},
|
||||
{file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"},
|
||||
{file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"},
|
||||
|
|
@ -1628,7 +1586,7 @@ version = "8.1.8"
|
|||
description = "Composable command line interface toolkit"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "lint", "tools", "vdb"]
|
||||
groups = ["main", "dev", "lint", "tools", "vdb"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
|
||||
|
|
@ -1868,7 +1826,7 @@ files = [
|
|||
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
|
||||
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
|
||||
]
|
||||
markers = {main = "python_version == \"3.11\" or python_version >= \"3.12\"", dev = "(python_version == \"3.11\" or python_version >= \"3.12\") and sys_platform == \"win32\"", lint = "(python_version == \"3.11\" or python_version >= \"3.12\") and platform_system == \"Windows\"", tools = "(python_version == \"3.11\" or python_version >= \"3.12\") and platform_system == \"Windows\"", vdb = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_system == \"Windows\" or os_name == \"nt\" or sys_platform == \"win32\")"}
|
||||
markers = {main = "python_version == \"3.11\" or python_version >= \"3.12\"", dev = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_system == \"Windows\" or sys_platform == \"win32\")", lint = "(python_version == \"3.11\" or python_version >= \"3.12\") and platform_system == \"Windows\"", tools = "(python_version == \"3.11\" or python_version >= \"3.12\") and platform_system == \"Windows\"", vdb = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_system == \"Windows\" or os_name == \"nt\" or sys_platform == \"win32\")"}
|
||||
|
||||
[[package]]
|
||||
name = "coloredlogs"
|
||||
|
|
@ -2126,6 +2084,7 @@ files = [
|
|||
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:60eb32934076fa07e4316b7b2742fa52cbb190b42c2df2863dbc4230a0a9b385"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e"},
|
||||
{file = "cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053"},
|
||||
|
|
@ -2136,6 +2095,7 @@ files = [
|
|||
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:9abcc2e083cbe8dde89124a47e5e53ec38751f0d7dfd36801008f316a127d7ba"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285"},
|
||||
{file = "cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417"},
|
||||
|
|
@ -2832,7 +2792,7 @@ version = "3.1.0"
|
|||
description = "A simple framework for building complex web applications."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "flask-3.1.0-py3-none-any.whl", hash = "sha256:d667207822eb83f1c4b50949b1623c8fc8d51f2341d65f72e1a1815397551136"},
|
||||
|
|
@ -2971,7 +2931,7 @@ version = "3.1.1"
|
|||
description = "Add SQLAlchemy support to your Flask application."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
groups = ["main", "dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "flask_sqlalchemy-3.1.1-py3-none-any.whl", hash = "sha256:4ba4be7f419dc72f4efd8802d69974803c37259dd42f3913b0dcf75c9447e0a0"},
|
||||
|
|
@ -3834,7 +3794,7 @@ version = "3.1.1"
|
|||
description = "Lightweight in-process concurrent programming"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "tools", "vdb"]
|
||||
groups = ["main", "dev", "tools", "vdb"]
|
||||
files = [
|
||||
{file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"},
|
||||
{file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"},
|
||||
|
|
@ -3910,7 +3870,7 @@ files = [
|
|||
{file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"},
|
||||
{file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"},
|
||||
]
|
||||
markers = {main = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_python_implementation == \"CPython\")", tools = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")", vdb = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
|
||||
markers = {main = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_python_implementation == \"CPython\")", dev = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")", tools = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")", vdb = "(python_version == \"3.11\" or python_version >= \"3.12\") and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"}
|
||||
|
||||
[package.extras]
|
||||
docs = ["Sphinx", "furo"]
|
||||
|
|
@ -4585,7 +4545,7 @@ version = "2.2.0"
|
|||
description = "Safely pass data to untrusted environments and back."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"},
|
||||
|
|
@ -4622,7 +4582,7 @@ version = "3.1.5"
|
|||
description = "A very fast and expressive template engine."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"},
|
||||
|
|
@ -5554,7 +5514,7 @@ version = "3.0.2"
|
|||
description = "Safely add untrusted strings to HTML/XML markup."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"},
|
||||
|
|
@ -7801,7 +7761,6 @@ files = [
|
|||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"},
|
||||
{file = "psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"},
|
||||
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"},
|
||||
|
|
@ -10158,7 +10117,7 @@ version = "2.0.35"
|
|||
description = "Database Abstraction Library"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
groups = ["main", "tools", "vdb"]
|
||||
groups = ["main", "dev", "tools", "vdb"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"},
|
||||
|
|
@ -10887,26 +10846,179 @@ rich = ">=10.11.0"
|
|||
shellingham = ">=1.3.0"
|
||||
typing-extensions = ">=3.7.4.3"
|
||||
|
||||
[[package]]
|
||||
name = "types-beautifulsoup4"
|
||||
version = "4.12.0.20241020"
|
||||
description = "Typing stubs for beautifulsoup4"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types-beautifulsoup4-4.12.0.20241020.tar.gz", hash = "sha256:158370d08d0cd448bd11b132a50ff5279237a5d4b5837beba074de152a513059"},
|
||||
{file = "types_beautifulsoup4-4.12.0.20241020-py3-none-any.whl", hash = "sha256:c95e66ce15a4f5f0835f7fbc5cd886321ae8294f977c495424eaf4225307fd30"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
types-html5lib = "*"
|
||||
|
||||
[[package]]
|
||||
name = "types-flask-cors"
|
||||
version = "5.0.0.20240902"
|
||||
description = "Typing stubs for Flask-Cors"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types-Flask-Cors-5.0.0.20240902.tar.gz", hash = "sha256:8921b273bf7cd9636df136b66408efcfa6338a935e5c8f53f5eff1cee03f3394"},
|
||||
{file = "types_Flask_Cors-5.0.0.20240902-py3-none-any.whl", hash = "sha256:595e5f36056cd128ab905832e055f2e5d116fbdc685356eea4490bc77df82137"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
Flask = ">=2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "types-flask-migrate"
|
||||
version = "4.1.0.20250112"
|
||||
description = "Typing stubs for Flask-Migrate"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_Flask_Migrate-4.1.0.20250112-py3-none-any.whl", hash = "sha256:1814fffc609c2ead784affd011de92f0beecd48044963a8c898dd107dc1b5969"},
|
||||
{file = "types_flask_migrate-4.1.0.20250112.tar.gz", hash = "sha256:f2d2c966378ae7bb0660ec810e9af0a56ca03108235364c2a7b5e90418b0ff67"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
Flask = ">=2.0.0"
|
||||
Flask-SQLAlchemy = ">=3.0.1"
|
||||
|
||||
[[package]]
|
||||
name = "types-html5lib"
|
||||
version = "1.1.11.20241018"
|
||||
description = "Typing stubs for html5lib"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types-html5lib-1.1.11.20241018.tar.gz", hash = "sha256:98042555ff78d9e3a51c77c918b1041acbb7eb6c405408d8a9e150ff5beccafa"},
|
||||
{file = "types_html5lib-1.1.11.20241018-py3-none-any.whl", hash = "sha256:3f1e064d9ed2c289001ae6392c84c93833abb0816165c6ff0abfc304a779f403"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-openpyxl"
|
||||
version = "3.1.5.20241225"
|
||||
description = "Typing stubs for openpyxl"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_openpyxl-3.1.5.20241225-py3-none-any.whl", hash = "sha256:903d92f58f42135b0614d609868c619aee12e1c7b65ccf8472dfd2706bcc6f47"},
|
||||
{file = "types_openpyxl-3.1.5.20241225.tar.gz", hash = "sha256:3c076f4c6f114e1859b6857ffd486e96c938c0434451c60dc54c2bcb62750d78"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-protobuf"
|
||||
version = "5.29.1.20241207"
|
||||
description = "Typing stubs for protobuf"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_protobuf-5.29.1.20241207-py3-none-any.whl", hash = "sha256:92893c42083e9b718c678badc0af7a9a1307b92afe1599e5cba5f3d35b668b2f"},
|
||||
{file = "types_protobuf-5.29.1.20241207.tar.gz", hash = "sha256:2ebcadb8ab3ef2e3e2f067e0882906d64ba0dc65fc5b0fd7a8b692315b4a0be9"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-psutil"
|
||||
version = "6.1.0.20241221"
|
||||
description = "Typing stubs for psutil"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_psutil-6.1.0.20241221-py3-none-any.whl", hash = "sha256:8498dbe13285a9ba7d4b2fa934c569cc380efc74e3dacdb34ae16d2cdf389ec3"},
|
||||
{file = "types_psutil-6.1.0.20241221.tar.gz", hash = "sha256:600f5a36bd5e0eb8887f0e3f3ff2cf154d90690ad8123c8a707bba4ab94d3185"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-psycopg2"
|
||||
version = "2.9.21.20250121"
|
||||
description = "Typing stubs for psycopg2"
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_psycopg2-2.9.21.20250121-py3-none-any.whl", hash = "sha256:b890dc6f5a08b6433f0ff73a4ec9a834deedad3e914f2a4a6fd43df021f745f1"},
|
||||
{file = "types_psycopg2-2.9.21.20250121.tar.gz", hash = "sha256:2b0e2cd0f3747af1ae25a7027898716d80209604770ef3cbf350fe055b9c349b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-python-dateutil"
|
||||
version = "2.9.0.20241206"
|
||||
description = "Typing stubs for python-dateutil"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
|
||||
{file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-pytz"
|
||||
version = "2024.2.0.20241221"
|
||||
description = "Typing stubs for pytz"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
groups = ["main", "dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_pytz-2024.2.0.20241221-py3-none-any.whl", hash = "sha256:8fc03195329c43637ed4f593663df721fef919b60a969066e22606edf0b53ad5"},
|
||||
{file = "types_pytz-2024.2.0.20241221.tar.gz", hash = "sha256:06d7cde9613e9f7504766a0554a270c369434b50e00975b3a4a0f6eed0f2c1a9"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-pyyaml"
|
||||
version = "6.0.12.20241230"
|
||||
description = "Typing stubs for PyYAML"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_PyYAML-6.0.12.20241230-py3-none-any.whl", hash = "sha256:fa4d32565219b68e6dee5f67534c722e53c00d1cfc09c435ef04d7353e1e96e6"},
|
||||
{file = "types_pyyaml-6.0.12.20241230.tar.gz", hash = "sha256:7f07622dbd34bb9c8b264fe860a17e0efcad00d50b5f27e93984909d9363498c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-regex"
|
||||
version = "2024.11.6.20241221"
|
||||
description = "Typing stubs for regex"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_regex-2024.11.6.20241221-py3-none-any.whl", hash = "sha256:9d29ab639df22a86e15e2cc037e92ad100a4e8f4ecd2ad261d6f0c6d8d87f54e"},
|
||||
{file = "types_regex-2024.11.6.20241221.tar.gz", hash = "sha256:903c7b557d935363ba01f07a75981c78ada7df66623e415f32bda2afecfa5cca"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-requests"
|
||||
version = "2.32.0.20241016"
|
||||
description = "Typing stubs for requests"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["main"]
|
||||
groups = ["main", "dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
|
||||
|
|
@ -10916,6 +11028,35 @@ files = [
|
|||
[package.dependencies]
|
||||
urllib3 = ">=2"
|
||||
|
||||
[[package]]
|
||||
name = "types-six"
|
||||
version = "1.17.0.20241205"
|
||||
description = "Typing stubs for six"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_six-1.17.0.20241205-py3-none-any.whl", hash = "sha256:a4947c2bdcd9ab69d44466a533a15839ff48ddc27223615cb8145d73ab805bc2"},
|
||||
{file = "types_six-1.17.0.20241205.tar.gz", hash = "sha256:1f662347a8f3b2bf30517d629d82f591420df29811794b0bf3804e14d716f6e0"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-tqdm"
|
||||
version = "4.67.0.20241221"
|
||||
description = "Typing stubs for tqdm"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
groups = ["dev"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "types_tqdm-4.67.0.20241221-py3-none-any.whl", hash = "sha256:a1f1c9cda5c2d8482d2c73957a5398bfdedda10f6bc7b3b4e812d5c910486d29"},
|
||||
{file = "types_tqdm-4.67.0.20241221.tar.gz", hash = "sha256:e56046631056922385abe89aeb18af5611f471eadd7918a0ad7f34d84cd4c8cc"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
types-requests = "*"
|
||||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.12.2"
|
||||
|
|
@ -11171,7 +11312,7 @@ version = "2.3.0"
|
|||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "storage", "tools", "vdb"]
|
||||
groups = ["main", "dev", "storage", "tools", "vdb"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"},
|
||||
|
|
@ -11666,7 +11807,7 @@ version = "3.1.3"
|
|||
description = "The comprehensive WSGI web application library."
|
||||
optional = false
|
||||
python-versions = ">=3.9"
|
||||
groups = ["main", "tools"]
|
||||
groups = ["main", "dev", "tools"]
|
||||
markers = "python_version == \"3.11\" or python_version >= \"3.12\""
|
||||
files = [
|
||||
{file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"},
|
||||
|
|
@ -12247,4 +12388,4 @@ cffi = ["cffi (>=1.11)"]
|
|||
[metadata]
|
||||
lock-version = "2.1"
|
||||
python-versions = ">=3.11,<3.13"
|
||||
content-hash = "a8fff72e974a1bd5c28f4ae326d120410a5628ad0bc65d87adca4e943130ec8f"
|
||||
content-hash = "6243573a26b9aa03558eb2c176d2477a08b1033a17065e870e4be83af0af644d"
|
||||
|
|
|
|||
|
|
@ -88,7 +88,6 @@ tencentcloud-sdk-python-hunyuan = "~3.0.1294"
|
|||
tiktoken = "~0.8.0"
|
||||
tokenizers = "~0.15.0"
|
||||
transformers = "~4.35.0"
|
||||
types-pytz = "~2024.2.0.20241003"
|
||||
unstructured = { version = "~0.16.1", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] }
|
||||
validators = "0.21.0"
|
||||
volcengine-python-sdk = {extras = ["ark"], version = "~1.0.98"}
|
||||
|
|
@ -183,6 +182,21 @@ pytest = "~8.3.2"
|
|||
pytest-benchmark = "~4.0.0"
|
||||
pytest-env = "~1.1.3"
|
||||
pytest-mock = "~3.14.0"
|
||||
types-beautifulsoup4 = "~4.12.0.20241020"
|
||||
types-flask-cors = "~5.0.0.20240902"
|
||||
types-flask-migrate = "~4.1.0.20250112"
|
||||
types-html5lib = "~1.1.11.20241018"
|
||||
types-openpyxl = "~3.1.5.20241225"
|
||||
types-protobuf = "~5.29.1.20241207"
|
||||
types-psutil = "~6.1.0.20241221"
|
||||
types-psycopg2 = "~2.9.21.20250121"
|
||||
types-python-dateutil = "~2.9.0.20241206"
|
||||
types-pytz = "~2024.2.0.20241221"
|
||||
types-pyyaml = "~6.0.12.20241230"
|
||||
types-regex = "~2024.11.6.20241221"
|
||||
types-requests = "~2.32.0.20241016"
|
||||
types-six = "~1.17.0.20241205"
|
||||
types-tqdm = "~4.67.0.20241221"
|
||||
|
||||
############################################################
|
||||
# [ Lint ] dependency group
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ class FirecrawlAuth(ApiKeyAuthBase):
|
|||
headers = self._prepare_headers()
|
||||
options = {
|
||||
"url": "https://example.com",
|
||||
"excludes": [],
|
||||
"includes": [],
|
||||
"includePaths": [],
|
||||
"excludePaths": [],
|
||||
"limit": 1,
|
||||
"scrapeOptions": {"onlyMainContent": True},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ from models.source import DataSourceOauthBinding
|
|||
from services.entities.knowledge_entities.knowledge_entities import (
|
||||
ChildChunkUpdateArgs,
|
||||
KnowledgeConfig,
|
||||
MetaDataConfig,
|
||||
RerankingModel,
|
||||
RetrievalModel,
|
||||
SegmentUpdateArgs,
|
||||
|
|
@ -894,6 +895,9 @@ class DocumentService:
|
|||
document.data_source_info = json.dumps(data_source_info)
|
||||
document.batch = batch
|
||||
document.indexing_status = "waiting"
|
||||
if knowledge_config.metadata:
|
||||
document.doc_type = knowledge_config.metadata.doc_type
|
||||
document.metadata = knowledge_config.metadata.doc_metadata
|
||||
db.session.add(document)
|
||||
documents.append(document)
|
||||
duplicate_document_ids.append(document.id)
|
||||
|
|
@ -910,6 +914,7 @@ class DocumentService:
|
|||
account,
|
||||
file_name,
|
||||
batch,
|
||||
knowledge_config.metadata,
|
||||
)
|
||||
db.session.add(document)
|
||||
db.session.flush()
|
||||
|
|
@ -965,6 +970,7 @@ class DocumentService:
|
|||
account,
|
||||
page.page_name,
|
||||
batch,
|
||||
knowledge_config.metadata,
|
||||
)
|
||||
db.session.add(document)
|
||||
db.session.flush()
|
||||
|
|
@ -1005,6 +1011,7 @@ class DocumentService:
|
|||
account,
|
||||
document_name,
|
||||
batch,
|
||||
knowledge_config.metadata,
|
||||
)
|
||||
db.session.add(document)
|
||||
db.session.flush()
|
||||
|
|
@ -1042,6 +1049,7 @@ class DocumentService:
|
|||
account: Account,
|
||||
name: str,
|
||||
batch: str,
|
||||
metadata: Optional[MetaDataConfig] = None,
|
||||
):
|
||||
document = Document(
|
||||
tenant_id=dataset.tenant_id,
|
||||
|
|
@ -1057,6 +1065,9 @@ class DocumentService:
|
|||
doc_form=document_form,
|
||||
doc_language=document_language,
|
||||
)
|
||||
if metadata is not None:
|
||||
document.doc_metadata = metadata.doc_metadata
|
||||
document.doc_type = metadata.doc_type
|
||||
return document
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1169,6 +1180,10 @@ class DocumentService:
|
|||
# update document name
|
||||
if document_data.name:
|
||||
document.name = document_data.name
|
||||
# update doc_type and doc_metadata if provided
|
||||
if document_data.metadata is not None:
|
||||
document.doc_metadata = document_data.metadata.doc_type
|
||||
document.doc_type = document_data.metadata.doc_type
|
||||
# update document to be waiting
|
||||
document.indexing_status = "waiting"
|
||||
document.completed_at = None
|
||||
|
|
|
|||
|
|
@ -93,6 +93,11 @@ class RetrievalModel(BaseModel):
|
|||
score_threshold: Optional[float] = None
|
||||
|
||||
|
||||
class MetaDataConfig(BaseModel):
|
||||
doc_type: str
|
||||
doc_metadata: dict
|
||||
|
||||
|
||||
class KnowledgeConfig(BaseModel):
|
||||
original_document_id: Optional[str] = None
|
||||
duplicate: bool = True
|
||||
|
|
@ -105,6 +110,7 @@ class KnowledgeConfig(BaseModel):
|
|||
embedding_model: Optional[str] = None
|
||||
embedding_model_provider: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
metadata: Optional[MetaDataConfig] = None
|
||||
|
||||
|
||||
class SegmentUpdateArgs(BaseModel):
|
||||
|
|
|
|||
|
|
@ -38,9 +38,8 @@ class WebsiteService:
|
|||
only_main_content = options.get("only_main_content", False)
|
||||
if not crawl_sub_pages:
|
||||
params = {
|
||||
"includes": [],
|
||||
"excludes": [],
|
||||
"generateImgAltText": True,
|
||||
"includePaths": [],
|
||||
"excludePaths": [],
|
||||
"limit": 1,
|
||||
"scrapeOptions": {"onlyMainContent": only_main_content},
|
||||
}
|
||||
|
|
@ -48,9 +47,8 @@ class WebsiteService:
|
|||
includes = options.get("includes").split(",") if options.get("includes") else []
|
||||
excludes = options.get("excludes").split(",") if options.get("excludes") else []
|
||||
params = {
|
||||
"includes": includes,
|
||||
"excludes": excludes,
|
||||
"generateImgAltText": True,
|
||||
"includePaths": includes,
|
||||
"excludePaths": excludes,
|
||||
"limit": options.get("limit", 1),
|
||||
"scrapeOptions": {"onlyMainContent": only_main_content},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from core.model_runtime.model_providers.huggingface_hub.llm.llm import Huggingfa
|
|||
from tests.integration_tests.model_runtime.__mock.huggingface import setup_huggingface_mock
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True)
|
||||
def test_hosted_inference_api_validate_credentials(setup_huggingface_mock):
|
||||
model = HuggingfaceHubLargeLanguageModel()
|
||||
|
|
|
|||
|
|
@ -10,9 +10,8 @@ def test_firecrawl_web_extractor_crawl_mode(mocker):
|
|||
base_url = "https://api.firecrawl.dev"
|
||||
firecrawl_app = FirecrawlApp(api_key=api_key, base_url=base_url)
|
||||
params = {
|
||||
"includes": [],
|
||||
"excludes": [],
|
||||
"generateImgAltText": True,
|
||||
"includePaths": [],
|
||||
"excludePaths": [],
|
||||
"maxDepth": 1,
|
||||
"limit": 1,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,6 +47,44 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
<Property name='text' type='string' key='text'>
|
||||
Document content
|
||||
</Property>
|
||||
<Property name='doc_type' type='string' key='doc_type'>
|
||||
Type of document (optional):
|
||||
- <code>book</code> Book
|
||||
- <code>web_page</code> Web page
|
||||
- <code>paper</code> Academic paper/article
|
||||
- <code>social_media_post</code> Social media post
|
||||
- <code>wikipedia_entry</code> Wikipedia entry
|
||||
- <code>personal_document</code> Personal document
|
||||
- <code>business_document</code> Business document
|
||||
- <code>im_chat_log</code> Chat log
|
||||
- <code>synced_from_notion</code> Notion document
|
||||
- <code>synced_from_github</code> GitHub document
|
||||
- <code>others</code> Other document types
|
||||
</Property>
|
||||
<Property name='doc_metadata' type='object' key='doc_metadata'>
|
||||
Document metadata (required if doc_type is provided). Fields vary by doc_type:
|
||||
For <code>book</code>:
|
||||
- <code>title</code> Book title
|
||||
- <code>language</code> Book language
|
||||
- <code>author</code> Book author
|
||||
- <code>publisher</code> Publisher name
|
||||
- <code>publication_date</code> Publication date
|
||||
- <code>isbn</code> ISBN number
|
||||
- <code>category</code> Book category
|
||||
|
||||
For <code>web_page</code>:
|
||||
- <code>title</code> Page title
|
||||
- <code>url</code> Page URL
|
||||
- <code>language</code> Page language
|
||||
- <code>publish_date</code> Publish date
|
||||
- <code>author/publisher</code> Author or publisher
|
||||
- <code>topic/keywords</code> Topic or keywords
|
||||
- <code>description</code> Page description
|
||||
|
||||
Please check [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) for more details on the fields required for each doc_type.
|
||||
|
||||
For doc_type "others", any valid JSON object is accepted
|
||||
</Property>
|
||||
<Property name='indexing_technique' type='string' key='indexing_technique'>
|
||||
Index mode
|
||||
- <code>high_quality</code> High quality: embedding using embedding model, built as vector database index
|
||||
|
|
@ -195,6 +233,68 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
- <code>hierarchical_model</code> Parent-child mode
|
||||
- <code>qa_model</code> Q&A Mode: Generates Q&A pairs for segmented documents and then embeds the questions
|
||||
|
||||
- <code>doc_type</code> Type of document (optional)
|
||||
- <code>book</code> Book
|
||||
Document records a book or publication
|
||||
- <code>web_page</code> Web page
|
||||
Document records web page content
|
||||
- <code>paper</code> Academic paper/article
|
||||
Document records academic paper or research article
|
||||
- <code>social_media_post</code> Social media post
|
||||
Content from social media posts
|
||||
- <code>wikipedia_entry</code> Wikipedia entry
|
||||
Content from Wikipedia entries
|
||||
- <code>personal_document</code> Personal document
|
||||
Documents related to personal content
|
||||
- <code>business_document</code> Business document
|
||||
Documents related to business content
|
||||
- <code>im_chat_log</code> Chat log
|
||||
Records of instant messaging chats
|
||||
- <code>synced_from_notion</code> Notion document
|
||||
Documents synchronized from Notion
|
||||
- <code>synced_from_github</code> GitHub document
|
||||
Documents synchronized from GitHub
|
||||
- <code>others</code> Other document types
|
||||
Other document types not listed above
|
||||
|
||||
- <code>doc_metadata</code> Document metadata (required if doc_type is provided)
|
||||
Fields vary by doc_type:
|
||||
|
||||
For <code>book</code>:
|
||||
- <code>title</code> Book title
|
||||
Title of the book
|
||||
- <code>language</code> Book language
|
||||
Language of the book
|
||||
- <code>author</code> Book author
|
||||
Author of the book
|
||||
- <code>publisher</code> Publisher name
|
||||
Name of the publishing house
|
||||
- <code>publication_date</code> Publication date
|
||||
Date when the book was published
|
||||
- <code>isbn</code> ISBN number
|
||||
International Standard Book Number
|
||||
- <code>category</code> Book category
|
||||
Category or genre of the book
|
||||
|
||||
For <code>web_page</code>:
|
||||
- <code>title</code> Page title
|
||||
Title of the web page
|
||||
- <code>url</code> Page URL
|
||||
URL address of the web page
|
||||
- <code>language</code> Page language
|
||||
Language of the web page
|
||||
- <code>publish_date</code> Publish date
|
||||
Date when the web page was published
|
||||
- <code>author/publisher</code> Author or publisher
|
||||
Author or publisher of the web page
|
||||
- <code>topic/keywords</code> Topic or keywords
|
||||
Topics or keywords of the web page
|
||||
- <code>description</code> Page description
|
||||
Description of the web page content
|
||||
|
||||
Please check [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) for more details on the fields required for each doc_type.
|
||||
For doc_type "others", any valid JSON object is accepted
|
||||
|
||||
- <code>doc_language</code> In Q&A mode, specify the language of the document, for example: <code>English</code>, <code>Chinese</code>
|
||||
|
||||
- <code>process_rule</code> Processing rules
|
||||
|
|
@ -307,6 +407,44 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
<Property name='description' type='string' key='description'>
|
||||
Knowledge description (optional)
|
||||
</Property>
|
||||
<Property name='doc_type' type='string' key='doc_type'>
|
||||
Type of document (optional):
|
||||
- <code>book</code> Book
|
||||
- <code>web_page</code> Web page
|
||||
- <code>paper</code> Academic paper/article
|
||||
- <code>social_media_post</code> Social media post
|
||||
- <code>wikipedia_entry</code> Wikipedia entry
|
||||
- <code>personal_document</code> Personal document
|
||||
- <code>business_document</code> Business document
|
||||
- <code>im_chat_log</code> Chat log
|
||||
- <code>synced_from_notion</code> Notion document
|
||||
- <code>synced_from_github</code> GitHub document
|
||||
- <code>others</code> Other document types
|
||||
</Property>
|
||||
<Property name='doc_metadata' type='object' key='doc_metadata'>
|
||||
Document metadata (required if doc_type is provided). Fields vary by doc_type:
|
||||
For <code>book</code>:
|
||||
- <code>title</code> Book title
|
||||
- <code>language</code> Book language
|
||||
- <code>author</code> Book author
|
||||
- <code>publisher</code> Publisher name
|
||||
- <code>publication_date</code> Publication date
|
||||
- <code>isbn</code> ISBN number
|
||||
- <code>category</code> Book category
|
||||
|
||||
For <code>web_page</code>:
|
||||
- <code>title</code> Page title
|
||||
- <code>url</code> Page URL
|
||||
- <code>language</code> Page language
|
||||
- <code>publish_date</code> Publish date
|
||||
- <code>author/publisher</code> Author or publisher
|
||||
- <code>topic/keywords</code> Topic or keywords
|
||||
- <code>description</code> Page description
|
||||
|
||||
Please check [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) for more details on the fields required for each doc_type.
|
||||
|
||||
For doc_type "others", any valid JSON object is accepted
|
||||
</Property>
|
||||
<Property name='indexing_technique' type='string' key='indexing_technique'>
|
||||
Index technique (optional)
|
||||
- <code>high_quality</code> High quality
|
||||
|
|
@ -624,6 +762,67 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
- <code>separator</code> Segmentation identifier. Currently, only one delimiter is allowed. The default is <code>***</code>
|
||||
- <code>max_tokens</code> The maximum length (tokens) must be validated to be shorter than the length of the parent chunk
|
||||
- <code>chunk_overlap</code> Define the overlap between adjacent chunks (optional)
|
||||
- <code>doc_type</code> Type of document (optional)
|
||||
- <code>book</code> Book
|
||||
Document records a book or publication
|
||||
- <code>web_page</code> Web page
|
||||
Document records web page content
|
||||
- <code>paper</code> Academic paper/article
|
||||
Document records academic paper or research article
|
||||
- <code>social_media_post</code> Social media post
|
||||
Content from social media posts
|
||||
- <code>wikipedia_entry</code> Wikipedia entry
|
||||
Content from Wikipedia entries
|
||||
- <code>personal_document</code> Personal document
|
||||
Documents related to personal content
|
||||
- <code>business_document</code> Business document
|
||||
Documents related to business content
|
||||
- <code>im_chat_log</code> Chat log
|
||||
Records of instant messaging chats
|
||||
- <code>synced_from_notion</code> Notion document
|
||||
Documents synchronized from Notion
|
||||
- <code>synced_from_github</code> GitHub document
|
||||
Documents synchronized from GitHub
|
||||
- <code>others</code> Other document types
|
||||
Other document types not listed above
|
||||
|
||||
- <code>doc_metadata</code> Document metadata (required if doc_type is provided)
|
||||
Fields vary by doc_type:
|
||||
|
||||
For <code>book</code>:
|
||||
- <code>title</code> Book title
|
||||
Title of the book
|
||||
- <code>language</code> Book language
|
||||
Language of the book
|
||||
- <code>author</code> Book author
|
||||
Author of the book
|
||||
- <code>publisher</code> Publisher name
|
||||
Name of the publishing house
|
||||
- <code>publication_date</code> Publication date
|
||||
Date when the book was published
|
||||
- <code>isbn</code> ISBN number
|
||||
International Standard Book Number
|
||||
- <code>category</code> Book category
|
||||
Category or genre of the book
|
||||
|
||||
For <code>web_page</code>:
|
||||
- <code>title</code> Page title
|
||||
Title of the web page
|
||||
- <code>url</code> Page URL
|
||||
URL address of the web page
|
||||
- <code>language</code> Page language
|
||||
Language of the web page
|
||||
- <code>publish_date</code> Publish date
|
||||
Date when the web page was published
|
||||
- <code>author/publisher</code> Author or publisher
|
||||
Author or publisher of the web page
|
||||
- <code>topic/keywords</code> Topic or keywords
|
||||
Topics or keywords of the web page
|
||||
- <code>description</code> Page description
|
||||
Description of the web page content
|
||||
|
||||
Please check [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) for more details on the fields required for each doc_type.
|
||||
For doc_type "others", any valid JSON object is accepted
|
||||
</Property>
|
||||
</Properties>
|
||||
</Col>
|
||||
|
|
|
|||
|
|
@ -47,6 +47,46 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
<Property name='text' type='string' key='text'>
|
||||
文档内容
|
||||
</Property>
|
||||
<Property name='doc_type' type='string' key='doc_type'>
|
||||
文档类型(选填)
|
||||
- <code>book</code> 图书 Book
|
||||
- <code>web_page</code> 网页 Web page
|
||||
- <code>paper</code> 学术论文/文章 Academic paper/article
|
||||
- <code>social_media_post</code> 社交媒体帖子 Social media post
|
||||
- <code>wikipedia_entry</code> 维基百科条目 Wikipedia entry
|
||||
- <code>personal_document</code> 个人文档 Personal document
|
||||
- <code>business_document</code> 商业文档 Business document
|
||||
- <code>im_chat_log</code> 即时通讯记录 Chat log
|
||||
- <code>synced_from_notion</code> Notion同步文档 Notion document
|
||||
- <code>synced_from_github</code> GitHub同步文档 GitHub document
|
||||
- <code>others</code> 其他文档类型 Other document types
|
||||
</Property>
|
||||
<Property name='doc_metadata' type='object' key='doc_metadata'>
|
||||
|
||||
文档元数据(如提供文档类型则必填)。字段因文档类型而异:
|
||||
|
||||
针对图书 For <code>book</code>:
|
||||
- <code>title</code> 书名 Book title
|
||||
- <code>language</code> 图书语言 Book language
|
||||
- <code>author</code> 作者 Book author
|
||||
- <code>publisher</code> 出版社 Publisher name
|
||||
- <code>publication_date</code> 出版日期 Publication date
|
||||
- <code>isbn</code> ISBN号码 ISBN number
|
||||
- <code>category</code> 图书分类 Book category
|
||||
|
||||
针对网页 For <code>web_page</code>:
|
||||
- <code>title</code> 页面标题 Page title
|
||||
- <code>url</code> 页面网址 Page URL
|
||||
- <code>language</code> 页面语言 Page language
|
||||
- <code>publish_date</code> 发布日期 Publish date
|
||||
- <code>author/publisher</code> 作者/发布者 Author or publisher
|
||||
- <code>topic/keywords</code> 主题/关键词 Topic or keywords
|
||||
- <code>description</code> 页面描述 Page description
|
||||
|
||||
请查看 [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) 了解各文档类型所需字段的详细信息。
|
||||
|
||||
针对"其他"类型文档,接受任何有效的JSON对象
|
||||
</Property>
|
||||
<Property name='indexing_technique' type='string' key='indexing_technique'>
|
||||
索引方式
|
||||
- <code>high_quality</code> 高质量:使用 embedding 模型进行嵌入,构建为向量数据库索引
|
||||
|
|
@ -194,6 +234,68 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
- <code>text_model</code> text 文档直接 embedding,经济模式默认为该模式
|
||||
- <code>hierarchical_model</code> parent-child 模式
|
||||
- <code>qa_model</code> Q&A 模式:为分片文档生成 Q&A 对,然后对问题进行 embedding
|
||||
- <code>doc_type</code> 文档类型(选填)Type of document (optional)
|
||||
- <code>book</code> 图书
|
||||
文档记录一本书籍或出版物
|
||||
- <code>web_page</code> 网页
|
||||
网页内容的文档记录
|
||||
- <code>paper</code> 学术论文/文章
|
||||
学术论文或研究文章的记录
|
||||
- <code>social_media_post</code> 社交媒体帖子
|
||||
社交媒体上的帖子内容
|
||||
- <code>wikipedia_entry</code> 维基百科条目
|
||||
维基百科的词条内容
|
||||
- <code>personal_document</code> 个人文档
|
||||
个人相关的文档记录
|
||||
- <code>business_document</code> 商业文档
|
||||
商业相关的文档记录
|
||||
- <code>im_chat_log</code> 即时通讯记录
|
||||
即时通讯的聊天记录
|
||||
- <code>synced_from_notion</code> Notion同步文档
|
||||
从Notion同步的文档内容
|
||||
- <code>synced_from_github</code> GitHub同步文档
|
||||
从GitHub同步的文档内容
|
||||
- <code>others</code> 其他文档类型
|
||||
其他未列出的文档类型
|
||||
|
||||
- <code>doc_metadata</code> 文档元数据(如提供文档类型则必填
|
||||
字段因文档类型而异
|
||||
|
||||
针对图书类型 For <code>book</code>:
|
||||
- <code>title</code> 书名
|
||||
书籍的标题
|
||||
- <code>language</code> 图书语言
|
||||
书籍的语言
|
||||
- <code>author</code> 作者
|
||||
书籍的作者
|
||||
- <code>publisher</code> 出版社
|
||||
出版社的名称
|
||||
- <code>publication_date</code> 出版日期
|
||||
书籍的出版日期
|
||||
- <code>isbn</code> ISBN号码
|
||||
书籍的ISBN编号
|
||||
- <code>category</code> 图书分类
|
||||
书籍的分类类别
|
||||
|
||||
针对网页类型 For <code>web_page</code>:
|
||||
- <code>title</code> 页面标题
|
||||
网页的标题
|
||||
- <code>url</code> 页面网址
|
||||
网页的URL地址
|
||||
- <code>language</code> 页面语言
|
||||
网页的语言
|
||||
- <code>publish_date</code> 发布日期
|
||||
网页的发布日期
|
||||
- <code>author/publisher</code> 作者/发布者
|
||||
网页的作者或发布者
|
||||
- <code>topic/keywords</code> 主题/关键词
|
||||
网页的主题或关键词
|
||||
- <code>description</code> 页面描述
|
||||
网页的描述信息
|
||||
|
||||
请查看 [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) 了解各文档类型所需字段的详细信息。
|
||||
|
||||
针对"其他"类型文档,接受任何有效的JSON对象
|
||||
|
||||
- <code>doc_language</code> 在 Q&A 模式下,指定文档的语言,例如:<code>English</code>、<code>Chinese</code>
|
||||
|
||||
|
|
@ -504,6 +606,46 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
<Property name='text' type='string' key='text'>
|
||||
文档内容(选填)
|
||||
</Property>
|
||||
<Property name='doc_type' type='string' key='doc_type'>
|
||||
文档类型(选填)
|
||||
- <code>book</code> 图书 Book
|
||||
- <code>web_page</code> 网页 Web page
|
||||
- <code>paper</code> 学术论文/文章 Academic paper/article
|
||||
- <code>social_media_post</code> 社交媒体帖子 Social media post
|
||||
- <code>wikipedia_entry</code> 维基百科条目 Wikipedia entry
|
||||
- <code>personal_document</code> 个人文档 Personal document
|
||||
- <code>business_document</code> 商业文档 Business document
|
||||
- <code>im_chat_log</code> 即时通讯记录 Chat log
|
||||
- <code>synced_from_notion</code> Notion同步文档 Notion document
|
||||
- <code>synced_from_github</code> GitHub同步文档 GitHub document
|
||||
- <code>others</code> 其他文档类型 Other document types
|
||||
</Property>
|
||||
<Property name='doc_metadata' type='object' key='doc_metadata'>
|
||||
|
||||
文档元数据(如提供文档类型则必填)。字段因文档类型而异:
|
||||
|
||||
针对图书 For <code>book</code>:
|
||||
- <code>title</code> 书名 Book title
|
||||
- <code>language</code> 图书语言 Book language
|
||||
- <code>author</code> 作者 Book author
|
||||
- <code>publisher</code> 出版社 Publisher name
|
||||
- <code>publication_date</code> 出版日期 Publication date
|
||||
- <code>isbn</code> ISBN号码 ISBN number
|
||||
- <code>category</code> 图书分类 Book category
|
||||
|
||||
针对网页 For <code>web_page</code>:
|
||||
- <code>title</code> 页面标题 Page title
|
||||
- <code>url</code> 页面网址 Page URL
|
||||
- <code>language</code> 页面语言 Page language
|
||||
- <code>publish_date</code> 发布日期 Publish date
|
||||
- <code>author/publisher</code> 作者/发布者 Author or publisher
|
||||
- <code>topic/keywords</code> 主题/关键词 Topic or keywords
|
||||
- <code>description</code> 页面描述 Page description
|
||||
|
||||
请查看 [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) 了解各文档类型所需字段的详细信息。
|
||||
|
||||
针对"其他"类型文档,接受任何有效的JSON对象
|
||||
</Property>
|
||||
<Property name='process_rule' type='object' key='process_rule'>
|
||||
处理规则(选填)
|
||||
- <code>mode</code> (string) 清洗、分段模式 ,automatic 自动 / custom 自定义
|
||||
|
|
@ -624,6 +766,68 @@ import { Row, Col, Properties, Property, Heading, SubProperty, PropertyInstructi
|
|||
- <code>separator</code> 分段标识符,目前仅允许设置一个分隔符。默认为 <code>***</code>
|
||||
- <code>max_tokens</code> 最大长度 (token) 需要校验小于父级的长度
|
||||
- <code>chunk_overlap</code> 分段重叠指的是在对数据进行分段时,段与段之间存在一定的重叠部分(选填)
|
||||
- <code>doc_type</code> 文档类型(选填)Type of document (optional)
|
||||
- <code>book</code> 图书
|
||||
文档记录一本书籍或出版物
|
||||
- <code>web_page</code> 网页
|
||||
网页内容的文档记录
|
||||
- <code>paper</code> 学术论文/文章
|
||||
学术论文或研究文章的记录
|
||||
- <code>social_media_post</code> 社交媒体帖子
|
||||
社交媒体上的帖子内容
|
||||
- <code>wikipedia_entry</code> 维基百科条目
|
||||
维基百科的词条内容
|
||||
- <code>personal_document</code> 个人文档
|
||||
个人相关的文档记录
|
||||
- <code>business_document</code> 商业文档
|
||||
商业相关的文档记录
|
||||
- <code>im_chat_log</code> 即时通讯记录
|
||||
即时通讯的聊天记录
|
||||
- <code>synced_from_notion</code> Notion同步文档
|
||||
从Notion同步的文档内容
|
||||
- <code>synced_from_github</code> GitHub同步文档
|
||||
从GitHub同步的文档内容
|
||||
- <code>others</code> 其他文档类型
|
||||
其他未列出的文档类型
|
||||
|
||||
- <code>doc_metadata</code> 文档元数据(如提供文档类型则必填
|
||||
字段因文档类型而异
|
||||
|
||||
针对图书类型 For <code>book</code>:
|
||||
- <code>title</code> 书名
|
||||
书籍的标题
|
||||
- <code>language</code> 图书语言
|
||||
书籍的语言
|
||||
- <code>author</code> 作者
|
||||
书籍的作者
|
||||
- <code>publisher</code> 出版社
|
||||
出版社的名称
|
||||
- <code>publication_date</code> 出版日期
|
||||
书籍的出版日期
|
||||
- <code>isbn</code> ISBN号码
|
||||
书籍的ISBN编号
|
||||
- <code>category</code> 图书分类
|
||||
书籍的分类类别
|
||||
|
||||
针对网页类型 For <code>web_page</code>:
|
||||
- <code>title</code> 页面标题
|
||||
网页的标题
|
||||
- <code>url</code> 页面网址
|
||||
网页的URL地址
|
||||
- <code>language</code> 页面语言
|
||||
网页的语言
|
||||
- <code>publish_date</code> 发布日期
|
||||
网页的发布日期
|
||||
- <code>author/publisher</code> 作者/发布者
|
||||
网页的作者或发布者
|
||||
- <code>topic/keywords</code> 主题/关键词
|
||||
网页的主题或关键词
|
||||
- <code>description</code> 页面描述
|
||||
网页的描述信息
|
||||
|
||||
请查看 [api/services/dataset_service.py](https://github.com/langgenius/dify/blob/main/api/services/dataset_service.py#L475) 了解各文档类型所需字段的详细信息。
|
||||
|
||||
针对"其他"类型文档,接受任何有效的JSON对象
|
||||
</Property>
|
||||
</Properties>
|
||||
</Col>
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@ const ChatItem: FC<ChatItemProps> = ({
|
|||
}, [modelConfig.configs.prompt_variables])
|
||||
const {
|
||||
chatList,
|
||||
chatListRef,
|
||||
isResponding,
|
||||
handleSend,
|
||||
suggestedQuestions,
|
||||
|
|
@ -102,7 +101,7 @@ const ChatItem: FC<ChatItemProps> = ({
|
|||
query: message,
|
||||
inputs,
|
||||
model_config: configData,
|
||||
parent_message_id: getLastAnswer(chatListRef.current)?.id || null,
|
||||
parent_message_id: getLastAnswer(chatList)?.id || null,
|
||||
}
|
||||
|
||||
if ((config.file_upload as any).enabled && files?.length && supportVision)
|
||||
|
|
@ -116,7 +115,7 @@ const ChatItem: FC<ChatItemProps> = ({
|
|||
onGetSuggestedQuestions: (responseItemId, getAbortController) => fetchSuggestedQuestions(appId, responseItemId, getAbortController),
|
||||
},
|
||||
)
|
||||
}, [appId, config, handleSend, inputs, modelAndParameter, textGenerationModelList, chatListRef])
|
||||
}, [appId, chatList, config, handleSend, inputs, modelAndParameter.model, modelAndParameter.parameters, modelAndParameter.provider, textGenerationModelList])
|
||||
|
||||
const { eventEmitter } = useEventEmitterContextContext()
|
||||
eventEmitter?.useSubscription((v: any) => {
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import {
|
|||
import Chat from '@/app/components/base/chat/chat'
|
||||
import { useChat } from '@/app/components/base/chat/chat/hooks'
|
||||
import { useDebugConfigurationContext } from '@/context/debug-configuration'
|
||||
import type { ChatConfig, ChatItem, OnSend } from '@/app/components/base/chat/types'
|
||||
import type { ChatConfig, ChatItem, ChatItemInTree, OnSend } from '@/app/components/base/chat/types'
|
||||
import { useProviderContext } from '@/context/provider-context'
|
||||
import {
|
||||
fetchConversationMessages,
|
||||
|
|
@ -24,7 +24,7 @@ import { useAppContext } from '@/context/app-context'
|
|||
import { ModelFeatureEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { useStore as useAppStore } from '@/app/components/app/store'
|
||||
import { useFeatures } from '@/app/components/base/features/hooks'
|
||||
import { getLastAnswer } from '@/app/components/base/chat/utils'
|
||||
import { getLastAnswer, isValidGeneratedAnswer } from '@/app/components/base/chat/utils'
|
||||
import type { InputForm } from '@/app/components/base/chat/chat/type'
|
||||
|
||||
type DebugWithSingleModelProps = {
|
||||
|
|
@ -68,12 +68,11 @@ const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSi
|
|||
}, [modelConfig.configs.prompt_variables])
|
||||
const {
|
||||
chatList,
|
||||
chatListRef,
|
||||
setTargetMessageId,
|
||||
isResponding,
|
||||
handleSend,
|
||||
suggestedQuestions,
|
||||
handleStop,
|
||||
handleUpdateChatList,
|
||||
handleRestart,
|
||||
handleAnnotationAdded,
|
||||
handleAnnotationEdited,
|
||||
|
|
@ -89,7 +88,7 @@ const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSi
|
|||
)
|
||||
useFormattingChangedSubscription(chatList)
|
||||
|
||||
const doSend: OnSend = useCallback((message, files, last_answer) => {
|
||||
const doSend: OnSend = useCallback((message, files, isRegenerate = false, parentAnswer: ChatItem | null = null) => {
|
||||
if (checkCanSend && !checkCanSend())
|
||||
return
|
||||
const currentProvider = textGenerationModelList.find(item => item.provider === modelConfig.provider)
|
||||
|
|
@ -110,7 +109,7 @@ const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSi
|
|||
query: message,
|
||||
inputs,
|
||||
model_config: configData,
|
||||
parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null,
|
||||
parent_message_id: (isRegenerate ? parentAnswer?.id : getLastAnswer(chatList)?.id) || null,
|
||||
}
|
||||
|
||||
if ((config.file_upload as any)?.enabled && files?.length && supportVision)
|
||||
|
|
@ -124,23 +123,13 @@ const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSi
|
|||
onGetSuggestedQuestions: (responseItemId, getAbortController) => fetchSuggestedQuestions(appId, responseItemId, getAbortController),
|
||||
},
|
||||
)
|
||||
}, [chatListRef, appId, checkCanSend, completionParams, config, handleSend, inputs, modelConfig, textGenerationModelList])
|
||||
}, [appId, chatList, checkCanSend, completionParams, config, handleSend, inputs, modelConfig.mode, modelConfig.model_id, modelConfig.provider, textGenerationModelList])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItem) => {
|
||||
const index = chatList.findIndex(item => item.id === chatItem.id)
|
||||
if (index === -1)
|
||||
return
|
||||
|
||||
const prevMessages = chatList.slice(0, index)
|
||||
const question = prevMessages.pop()
|
||||
const lastAnswer = getLastAnswer(prevMessages)
|
||||
|
||||
if (!question)
|
||||
return
|
||||
|
||||
handleUpdateChatList(prevMessages)
|
||||
doSend(question.content, question.message_files, lastAnswer)
|
||||
}, [chatList, handleUpdateChatList, doSend])
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree) => {
|
||||
const question = chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(question.content, question.message_files, true, isValidGeneratedAnswer(parentAnswer) ? parentAnswer : null)
|
||||
}, [chatList, doSend])
|
||||
|
||||
const allToolIcons = useMemo(() => {
|
||||
const icons: Record<string, any> = {}
|
||||
|
|
@ -173,6 +162,7 @@ const DebugWithSingleModel = forwardRef<DebugWithSingleModelRefType, DebugWithSi
|
|||
inputs={inputs}
|
||||
inputsForm={inputsForm}
|
||||
onRegenerate={doRegenerate}
|
||||
switchSibling={siblingMessageId => setTargetMessageId(siblingMessageId)}
|
||||
onStopResponding={handleStop}
|
||||
showPromptLog
|
||||
questionIcon={<Avatar avatar={userProfile.avatar_url} name={userProfile.name} size={40} />}
|
||||
|
|
|
|||
|
|
@ -3,10 +3,11 @@ import Chat from '../chat'
|
|||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
OnSend,
|
||||
} from '../types'
|
||||
import { useChat } from '../chat/hooks'
|
||||
import { getLastAnswer } from '../utils'
|
||||
import { getLastAnswer, isValidGeneratedAnswer } from '../utils'
|
||||
import { useChatWithHistoryContext } from './context'
|
||||
import Header from './header'
|
||||
import ConfigPanel from './config-panel'
|
||||
|
|
@ -20,7 +21,7 @@ import AnswerIcon from '@/app/components/base/answer-icon'
|
|||
const ChatWrapper = () => {
|
||||
const {
|
||||
appParams,
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
inputsForms,
|
||||
|
|
@ -50,8 +51,7 @@ const ChatWrapper = () => {
|
|||
}, [appParams, currentConversationItem?.introduction, currentConversationId])
|
||||
const {
|
||||
chatList,
|
||||
chatListRef,
|
||||
handleUpdateChatList,
|
||||
setTargetMessageId,
|
||||
handleSend,
|
||||
handleStop,
|
||||
isResponding,
|
||||
|
|
@ -62,7 +62,7 @@ const ChatWrapper = () => {
|
|||
inputs: (currentConversationId ? currentConversationItem?.inputs : newConversationInputs) as any,
|
||||
inputsForm: inputsForms,
|
||||
},
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
taskId => stopChatMessageResponding('', taskId, isInstalledApp, appId),
|
||||
)
|
||||
|
||||
|
|
@ -72,13 +72,13 @@ const ChatWrapper = () => {
|
|||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [])
|
||||
|
||||
const doSend: OnSend = useCallback((message, files, last_answer) => {
|
||||
const doSend: OnSend = useCallback((message, files, isRegenerate = false, parentAnswer: ChatItem | null = null) => {
|
||||
const data: any = {
|
||||
query: message,
|
||||
files,
|
||||
inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs,
|
||||
conversation_id: currentConversationId,
|
||||
parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null,
|
||||
parent_message_id: (isRegenerate ? parentAnswer?.id : getLastAnswer(chatList)?.id) || null,
|
||||
}
|
||||
|
||||
handleSend(
|
||||
|
|
@ -91,31 +91,21 @@ const ChatWrapper = () => {
|
|||
},
|
||||
)
|
||||
}, [
|
||||
chatListRef,
|
||||
chatList,
|
||||
handleNewConversationCompleted,
|
||||
handleSend,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
handleSend,
|
||||
newConversationInputs,
|
||||
handleNewConversationCompleted,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItem) => {
|
||||
const index = chatList.findIndex(item => item.id === chatItem.id)
|
||||
if (index === -1)
|
||||
return
|
||||
|
||||
const prevMessages = chatList.slice(0, index)
|
||||
const question = prevMessages.pop()
|
||||
const lastAnswer = getLastAnswer(prevMessages)
|
||||
|
||||
if (!question)
|
||||
return
|
||||
|
||||
handleUpdateChatList(prevMessages)
|
||||
doSend(question.content, question.message_files, lastAnswer)
|
||||
}, [chatList, handleUpdateChatList, doSend])
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree) => {
|
||||
const question = chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(question.content, question.message_files, true, isValidGeneratedAnswer(parentAnswer) ? parentAnswer : null)
|
||||
}, [chatList, doSend])
|
||||
|
||||
const chatNode = useMemo(() => {
|
||||
if (inputsForms.length) {
|
||||
|
|
@ -187,6 +177,7 @@ const ChatWrapper = () => {
|
|||
answerIcon={answerIcon}
|
||||
hideProcessDetail
|
||||
themeBuilder={themeBuilder}
|
||||
switchSibling={siblingMessageId => setTargetMessageId(siblingMessageId)}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { createContext, useContext } from 'use-context-selector'
|
|||
import type {
|
||||
Callback,
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
Feedback,
|
||||
} from '../types'
|
||||
import type { ThemeBuilder } from '../embedded-chatbot/theme/theme-context'
|
||||
|
|
@ -25,7 +25,7 @@ export type ChatWithHistoryContextValue = {
|
|||
appChatListDataLoading?: boolean
|
||||
currentConversationId: string
|
||||
currentConversationItem?: ConversationItem
|
||||
appPrevChatList: ChatItem[]
|
||||
appPrevChatTree: ChatItemInTree[]
|
||||
pinnedConversationList: AppConversationData['data']
|
||||
conversationList: AppConversationData['data']
|
||||
showConfigPanelBeforeChat: boolean
|
||||
|
|
@ -53,7 +53,7 @@ export type ChatWithHistoryContextValue = {
|
|||
|
||||
export const ChatWithHistoryContext = createContext<ChatWithHistoryContextValue>({
|
||||
currentConversationId: '',
|
||||
appPrevChatList: [],
|
||||
appPrevChatTree: [],
|
||||
pinnedConversationList: [],
|
||||
conversationList: [],
|
||||
showConfigPanelBeforeChat: false,
|
||||
|
|
|
|||
|
|
@ -12,10 +12,13 @@ import produce from 'immer'
|
|||
import type {
|
||||
Callback,
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
Feedback,
|
||||
} from '../types'
|
||||
import { CONVERSATION_ID_INFO } from '../constants'
|
||||
import { getPrevChatList } from '../utils'
|
||||
import { buildChatItemTree } from '../utils'
|
||||
import { addFileInfos, sortAgentSorts } from '../../../tools/utils'
|
||||
import { getProcessedFilesFromResponse } from '@/app/components/base/file-uploader/utils'
|
||||
import {
|
||||
delConversation,
|
||||
fetchAppInfo,
|
||||
|
|
@ -40,6 +43,32 @@ import { useAppFavicon } from '@/hooks/use-app-favicon'
|
|||
import { InputVarType } from '@/app/components/workflow/types'
|
||||
import { TransferMethod } from '@/types/app'
|
||||
|
||||
function getFormattedChatList(messages: any[]) {
|
||||
const newChatList: ChatItem[] = []
|
||||
messages.forEach((item) => {
|
||||
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||
newChatList.push({
|
||||
id: `question-${item.id}`,
|
||||
content: item.query,
|
||||
isAnswer: false,
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: item.parent_message_id || undefined,
|
||||
})
|
||||
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||
newChatList.push({
|
||||
id: item.id,
|
||||
content: item.answer,
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||
feedback: item.feedback,
|
||||
isAnswer: true,
|
||||
citation: item.retriever_resources,
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
parentMessageId: `question-${item.id}`,
|
||||
})
|
||||
})
|
||||
return newChatList
|
||||
}
|
||||
|
||||
export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
||||
const isInstalledApp = useMemo(() => !!installedAppInfo, [installedAppInfo])
|
||||
const { data: appInfo, isLoading: appInfoLoading, error: appInfoError } = useSWR(installedAppInfo ? null : 'appInfo', fetchAppInfo)
|
||||
|
|
@ -109,9 +138,9 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
|||
const { data: appConversationData, isLoading: appConversationDataLoading, mutate: mutateAppConversationData } = useSWR(['appConversationData', isInstalledApp, appId, false], () => fetchConversations(isInstalledApp, appId, undefined, false, 100))
|
||||
const { data: appChatListData, isLoading: appChatListDataLoading } = useSWR(chatShouldReloadKey ? ['appChatList', chatShouldReloadKey, isInstalledApp, appId] : null, () => fetchChatList(chatShouldReloadKey, isInstalledApp, appId))
|
||||
|
||||
const appPrevChatList = useMemo(
|
||||
const appPrevChatTree = useMemo(
|
||||
() => (currentConversationId && appChatListData?.data.length)
|
||||
? getPrevChatList(appChatListData.data)
|
||||
? buildChatItemTree(getFormattedChatList(appChatListData.data))
|
||||
: [],
|
||||
[appChatListData, currentConversationId],
|
||||
)
|
||||
|
|
@ -403,7 +432,7 @@ export const useChatWithHistory = (installedAppInfo?: InstalledApp) => {
|
|||
appConversationDataLoading,
|
||||
appChatListData,
|
||||
appChatListDataLoading,
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
pinnedConversationList,
|
||||
conversationList,
|
||||
showConfigPanelBeforeChat,
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ const ChatWithHistory: FC<ChatWithHistoryProps> = ({
|
|||
appInfoError,
|
||||
appData,
|
||||
appInfoLoading,
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
showConfigPanelBeforeChat,
|
||||
appChatListDataLoading,
|
||||
chatShouldReloadKey,
|
||||
|
|
@ -38,7 +38,7 @@ const ChatWithHistory: FC<ChatWithHistoryProps> = ({
|
|||
themeBuilder,
|
||||
} = useChatWithHistoryContext()
|
||||
|
||||
const chatReady = (!showConfigPanelBeforeChat || !!appPrevChatList.length)
|
||||
const chatReady = (!showConfigPanelBeforeChat || !!appPrevChatTree.length)
|
||||
const customConfig = appData?.custom_config
|
||||
const site = appData?.site
|
||||
|
||||
|
|
@ -76,9 +76,9 @@ const ChatWithHistory: FC<ChatWithHistoryProps> = ({
|
|||
<HeaderInMobile />
|
||||
)
|
||||
}
|
||||
<div className={`grow overflow-hidden ${showConfigPanelBeforeChat && !appPrevChatList.length && 'flex items-center justify-center'}`}>
|
||||
<div className={`grow overflow-hidden ${showConfigPanelBeforeChat && !appPrevChatTree.length && 'flex items-center justify-center'}`}>
|
||||
{
|
||||
showConfigPanelBeforeChat && !appChatListDataLoading && !appPrevChatList.length && (
|
||||
showConfigPanelBeforeChat && !appChatListDataLoading && !appPrevChatTree.length && (
|
||||
<div className={`flex w-full items-center justify-center h-full ${isMobile && 'px-4'}`}>
|
||||
<ConfigPanel />
|
||||
</div>
|
||||
|
|
@ -120,7 +120,7 @@ const ChatWithHistoryWrap: FC<ChatWithHistoryWrapProps> = ({
|
|||
appChatListDataLoading,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
pinnedConversationList,
|
||||
conversationList,
|
||||
showConfigPanelBeforeChat,
|
||||
|
|
@ -154,7 +154,7 @@ const ChatWithHistoryWrap: FC<ChatWithHistoryWrapProps> = ({
|
|||
appChatListDataLoading,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
appPrevChatList,
|
||||
appPrevChatTree,
|
||||
pinnedConversationList,
|
||||
conversationList,
|
||||
showConfigPanelBeforeChat,
|
||||
|
|
|
|||
|
|
@ -209,19 +209,19 @@ const Answer: FC<AnswerProps> = ({
|
|||
}
|
||||
{item.siblingCount && item.siblingCount > 1 && item.siblingIndex !== undefined && <div className="pt-3.5 flex justify-center items-center text-sm">
|
||||
<button
|
||||
className={`${item.prevSibling ? 'opacity-100' : 'opacity-65'}`}
|
||||
className={`${item.prevSibling ? 'opacity-100' : 'opacity-30'}`}
|
||||
disabled={!item.prevSibling}
|
||||
onClick={() => item.prevSibling && switchSibling?.(item.prevSibling)}
|
||||
>
|
||||
<ChevronRight className="w-[14px] h-[14px] rotate-180 text-text-tertiary" />
|
||||
<ChevronRight className="w-[14px] h-[14px] rotate-180 text-text-primary" />
|
||||
</button>
|
||||
<span className="px-2 text-xs text-text-quaternary">{item.siblingIndex + 1} / {item.siblingCount}</span>
|
||||
<span className="px-2 text-xs text-text-primary">{item.siblingIndex + 1} / {item.siblingCount}</span>
|
||||
<button
|
||||
className={`${item.nextSibling ? 'opacity-100' : 'opacity-65'}`}
|
||||
className={`${item.nextSibling ? 'opacity-100' : 'opacity-30'}`}
|
||||
disabled={!item.nextSibling}
|
||||
onClick={() => item.nextSibling && switchSibling?.(item.nextSibling)}
|
||||
>
|
||||
<ChevronRight className="w-[14px] h-[14px] text-text-tertiary" />
|
||||
<ChevronRight className="w-[14px] h-[14px] text-text-primary" />
|
||||
</button>
|
||||
</div>}
|
||||
</div>
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import {
|
||||
useCallback,
|
||||
useEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
|
|
@ -12,8 +13,10 @@ import { v4 as uuidV4 } from 'uuid'
|
|||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
Inputs,
|
||||
} from '../types'
|
||||
import { getThreadMessages } from '../utils'
|
||||
import type { InputForm } from './type'
|
||||
import {
|
||||
getProcessedInputs,
|
||||
|
|
@ -46,7 +49,7 @@ export const useChat = (
|
|||
inputs: Inputs
|
||||
inputsForm: InputForm[]
|
||||
},
|
||||
prevChatList?: ChatItem[],
|
||||
prevChatTree?: ChatItemInTree[],
|
||||
stopChat?: (taskId: string) => void,
|
||||
) => {
|
||||
const { t } = useTranslation()
|
||||
|
|
@ -56,14 +59,48 @@ export const useChat = (
|
|||
const hasStopResponded = useRef(false)
|
||||
const [isResponding, setIsResponding] = useState(false)
|
||||
const isRespondingRef = useRef(false)
|
||||
const [chatList, setChatList] = useState<ChatItem[]>(prevChatList || [])
|
||||
const chatListRef = useRef<ChatItem[]>(prevChatList || [])
|
||||
const taskIdRef = useRef('')
|
||||
const [suggestedQuestions, setSuggestQuestions] = useState<string[]>([])
|
||||
const conversationMessagesAbortControllerRef = useRef<AbortController | null>(null)
|
||||
const suggestedQuestionsAbortControllerRef = useRef<AbortController | null>(null)
|
||||
const params = useParams()
|
||||
const pathname = usePathname()
|
||||
|
||||
const [chatTree, setChatTree] = useState<ChatItemInTree[]>(prevChatTree || [])
|
||||
const chatTreeRef = useRef<ChatItemInTree[]>(chatTree)
|
||||
const [targetMessageId, setTargetMessageId] = useState<string>()
|
||||
const threadMessages = useMemo(() => getThreadMessages(chatTree, targetMessageId), [chatTree, targetMessageId])
|
||||
|
||||
const getIntroduction = useCallback((str: string) => {
|
||||
return processOpeningStatement(str, formSettings?.inputs || {}, formSettings?.inputsForm || [])
|
||||
}, [formSettings?.inputs, formSettings?.inputsForm])
|
||||
|
||||
/** Final chat list that will be rendered */
|
||||
const chatList = useMemo(() => {
|
||||
const ret = [...threadMessages]
|
||||
if (config?.opening_statement) {
|
||||
const index = threadMessages.findIndex(item => item.isOpeningStatement)
|
||||
|
||||
if (index > -1) {
|
||||
ret[index] = {
|
||||
...ret[index],
|
||||
content: getIntroduction(config.opening_statement),
|
||||
suggestedQuestions: config.suggested_questions,
|
||||
}
|
||||
}
|
||||
else {
|
||||
ret.unshift({
|
||||
id: `${Date.now()}`,
|
||||
content: getIntroduction(config.opening_statement),
|
||||
isAnswer: true,
|
||||
isOpeningStatement: true,
|
||||
suggestedQuestions: config.suggested_questions,
|
||||
})
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}, [threadMessages, config?.opening_statement, getIntroduction, config?.suggested_questions])
|
||||
|
||||
useEffect(() => {
|
||||
setAutoFreeze(false)
|
||||
return () => {
|
||||
|
|
@ -71,43 +108,50 @@ export const useChat = (
|
|||
}
|
||||
}, [])
|
||||
|
||||
const handleUpdateChatList = useCallback((newChatList: ChatItem[]) => {
|
||||
setChatList(newChatList)
|
||||
chatListRef.current = newChatList
|
||||
/** Find the target node by bfs and then operate on it */
|
||||
const produceChatTreeNode = useCallback((targetId: string, operation: (node: ChatItemInTree) => void) => {
|
||||
return produce(chatTreeRef.current, (draft) => {
|
||||
const queue: ChatItemInTree[] = [...draft]
|
||||
while (queue.length > 0) {
|
||||
const current = queue.shift()!
|
||||
if (current.id === targetId) {
|
||||
operation(current)
|
||||
break
|
||||
}
|
||||
if (current.children)
|
||||
queue.push(...current.children)
|
||||
}
|
||||
})
|
||||
}, [])
|
||||
|
||||
type UpdateChatTreeNode = {
|
||||
(id: string, fields: Partial<ChatItemInTree>): void
|
||||
(id: string, update: (node: ChatItemInTree) => void): void
|
||||
}
|
||||
|
||||
const updateChatTreeNode: UpdateChatTreeNode = useCallback((
|
||||
id: string,
|
||||
fieldsOrUpdate: Partial<ChatItemInTree> | ((node: ChatItemInTree) => void),
|
||||
) => {
|
||||
const nextState = produceChatTreeNode(id, (node) => {
|
||||
if (typeof fieldsOrUpdate === 'function') {
|
||||
fieldsOrUpdate(node)
|
||||
}
|
||||
else {
|
||||
Object.keys(fieldsOrUpdate).forEach((key) => {
|
||||
(node as any)[key] = (fieldsOrUpdate as any)[key]
|
||||
})
|
||||
}
|
||||
})
|
||||
setChatTree(nextState)
|
||||
chatTreeRef.current = nextState
|
||||
}, [produceChatTreeNode])
|
||||
|
||||
const handleResponding = useCallback((isResponding: boolean) => {
|
||||
setIsResponding(isResponding)
|
||||
isRespondingRef.current = isResponding
|
||||
}, [])
|
||||
|
||||
const getIntroduction = useCallback((str: string) => {
|
||||
return processOpeningStatement(str, formSettings?.inputs || {}, formSettings?.inputsForm || [])
|
||||
}, [formSettings?.inputs, formSettings?.inputsForm])
|
||||
useEffect(() => {
|
||||
if (config?.opening_statement) {
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const index = draft.findIndex(item => item.isOpeningStatement)
|
||||
|
||||
if (index > -1) {
|
||||
draft[index] = {
|
||||
...draft[index],
|
||||
content: getIntroduction(config.opening_statement),
|
||||
suggestedQuestions: config.suggested_questions,
|
||||
}
|
||||
}
|
||||
else {
|
||||
draft.unshift({
|
||||
id: `${Date.now()}`,
|
||||
content: getIntroduction(config.opening_statement),
|
||||
isAnswer: true,
|
||||
isOpeningStatement: true,
|
||||
suggestedQuestions: config.suggested_questions,
|
||||
})
|
||||
}
|
||||
}))
|
||||
}
|
||||
}, [config?.opening_statement, getIntroduction, config?.suggested_questions, handleUpdateChatList])
|
||||
|
||||
const handleStop = useCallback(() => {
|
||||
hasStopResponded.current = true
|
||||
handleResponding(false)
|
||||
|
|
@ -123,50 +167,50 @@ export const useChat = (
|
|||
conversationId.current = ''
|
||||
taskIdRef.current = ''
|
||||
handleStop()
|
||||
const newChatList = config?.opening_statement
|
||||
? [{
|
||||
id: `${Date.now()}`,
|
||||
content: config.opening_statement,
|
||||
isAnswer: true,
|
||||
isOpeningStatement: true,
|
||||
suggestedQuestions: config.suggested_questions,
|
||||
}]
|
||||
: []
|
||||
handleUpdateChatList(newChatList)
|
||||
setChatTree([])
|
||||
setSuggestQuestions([])
|
||||
}, [
|
||||
config,
|
||||
handleStop,
|
||||
handleUpdateChatList,
|
||||
])
|
||||
}, [handleStop])
|
||||
|
||||
const updateCurrentQA = useCallback(({
|
||||
const updateCurrentQAOnTree = useCallback(({
|
||||
parentId,
|
||||
responseItem,
|
||||
questionId,
|
||||
placeholderAnswerId,
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
}: {
|
||||
parentId?: string
|
||||
responseItem: ChatItem
|
||||
questionId: string
|
||||
placeholderAnswerId: string
|
||||
placeholderQuestionId: string
|
||||
questionItem: ChatItem
|
||||
}) => {
|
||||
const newListWithAnswer = produce(
|
||||
chatListRef.current.filter(item => item.id !== responseItem.id && item.id !== placeholderAnswerId),
|
||||
(draft) => {
|
||||
if (!draft.find(item => item.id === questionId))
|
||||
draft.push({ ...questionItem })
|
||||
|
||||
draft.push({ ...responseItem })
|
||||
let nextState: ChatItemInTree[]
|
||||
const currentQA = { ...questionItem, children: [{ ...responseItem, children: [] }] }
|
||||
if (!parentId && !chatTree.some(item => [placeholderQuestionId, questionItem.id].includes(item.id))) {
|
||||
// QA whose parent is not provided is considered as a first message of the conversation,
|
||||
// and it should be a root node of the chat tree
|
||||
nextState = produce(chatTree, (draft) => {
|
||||
draft.push(currentQA)
|
||||
})
|
||||
handleUpdateChatList(newListWithAnswer)
|
||||
}, [handleUpdateChatList])
|
||||
}
|
||||
else {
|
||||
// find the target QA in the tree and update it; if not found, insert it to its parent node
|
||||
nextState = produceChatTreeNode(parentId!, (parentNode) => {
|
||||
const questionNodeIndex = parentNode.children!.findIndex(item => [placeholderQuestionId, questionItem.id].includes(item.id))
|
||||
if (questionNodeIndex === -1)
|
||||
parentNode.children!.push(currentQA)
|
||||
else
|
||||
parentNode.children![questionNodeIndex] = currentQA
|
||||
})
|
||||
}
|
||||
setChatTree(nextState)
|
||||
chatTreeRef.current = nextState
|
||||
}, [chatTree, produceChatTreeNode])
|
||||
|
||||
const handleSend = useCallback(async (
|
||||
url: string,
|
||||
data: {
|
||||
query: string
|
||||
files?: FileEntity[]
|
||||
parent_message_id?: string
|
||||
[key: string]: any
|
||||
},
|
||||
{
|
||||
|
|
@ -183,12 +227,15 @@ export const useChat = (
|
|||
return false
|
||||
}
|
||||
|
||||
const questionId = `question-${Date.now()}`
|
||||
const parentMessage = threadMessages.find(item => item.id === data.parent_message_id)
|
||||
|
||||
const placeholderQuestionId = `question-${Date.now()}`
|
||||
const questionItem = {
|
||||
id: questionId,
|
||||
id: placeholderQuestionId,
|
||||
content: data.query,
|
||||
isAnswer: false,
|
||||
message_files: data.files,
|
||||
parentMessageId: data.parent_message_id,
|
||||
}
|
||||
|
||||
const placeholderAnswerId = `answer-placeholder-${Date.now()}`
|
||||
|
|
@ -196,18 +243,27 @@ export const useChat = (
|
|||
id: placeholderAnswerId,
|
||||
content: '',
|
||||
isAnswer: true,
|
||||
parentMessageId: questionItem.id,
|
||||
siblingIndex: parentMessage?.children?.length ?? chatTree.length,
|
||||
}
|
||||
|
||||
const newList = [...chatListRef.current, questionItem, placeholderAnswerItem]
|
||||
handleUpdateChatList(newList)
|
||||
setTargetMessageId(parentMessage?.id)
|
||||
updateCurrentQAOnTree({
|
||||
parentId: data.parent_message_id,
|
||||
responseItem: placeholderAnswerItem,
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
})
|
||||
|
||||
// answer
|
||||
const responseItem: ChatItem = {
|
||||
const responseItem: ChatItemInTree = {
|
||||
id: placeholderAnswerId,
|
||||
content: '',
|
||||
agent_thoughts: [],
|
||||
message_files: [],
|
||||
isAnswer: true,
|
||||
parentMessageId: questionItem.id,
|
||||
siblingIndex: parentMessage?.children?.length ?? chatTree.length,
|
||||
}
|
||||
|
||||
handleResponding(true)
|
||||
|
|
@ -268,7 +324,9 @@ export const useChat = (
|
|||
}
|
||||
|
||||
if (messageId && !hasSetResponseId) {
|
||||
questionItem.id = `question-${messageId}`
|
||||
responseItem.id = messageId
|
||||
responseItem.parentMessageId = questionItem.id
|
||||
hasSetResponseId = true
|
||||
}
|
||||
|
||||
|
|
@ -279,11 +337,11 @@ export const useChat = (
|
|||
if (messageId)
|
||||
responseItem.id = messageId
|
||||
|
||||
updateCurrentQA({
|
||||
responseItem,
|
||||
questionId,
|
||||
placeholderAnswerId,
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
async onCompleted(hasError?: boolean) {
|
||||
|
|
@ -304,43 +362,32 @@ export const useChat = (
|
|||
if (!newResponseItem)
|
||||
return
|
||||
|
||||
const newChatList = produce(chatListRef.current, (draft) => {
|
||||
const index = draft.findIndex(item => item.id === responseItem.id)
|
||||
if (index !== -1) {
|
||||
const question = draft[index - 1]
|
||||
draft[index - 1] = {
|
||||
...question,
|
||||
}
|
||||
draft[index] = {
|
||||
...draft[index],
|
||||
content: newResponseItem.answer,
|
||||
log: [
|
||||
...newResponseItem.message,
|
||||
...(newResponseItem.message[newResponseItem.message.length - 1].role !== 'assistant'
|
||||
? [
|
||||
{
|
||||
role: 'assistant',
|
||||
text: newResponseItem.answer,
|
||||
files: newResponseItem.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [],
|
||||
},
|
||||
]
|
||||
: []),
|
||||
],
|
||||
more: {
|
||||
time: formatTime(newResponseItem.created_at, 'hh:mm A'),
|
||||
tokens: newResponseItem.answer_tokens + newResponseItem.message_tokens,
|
||||
latency: newResponseItem.provider_response_latency.toFixed(2),
|
||||
},
|
||||
// for agent log
|
||||
conversationId: conversationId.current,
|
||||
input: {
|
||||
inputs: newResponseItem.inputs,
|
||||
query: newResponseItem.query,
|
||||
},
|
||||
}
|
||||
}
|
||||
updateChatTreeNode(responseItem.id, {
|
||||
content: newResponseItem.answer,
|
||||
log: [
|
||||
...newResponseItem.message,
|
||||
...(newResponseItem.message[newResponseItem.message.length - 1].role !== 'assistant'
|
||||
? [
|
||||
{
|
||||
role: 'assistant',
|
||||
text: newResponseItem.answer,
|
||||
files: newResponseItem.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [],
|
||||
},
|
||||
]
|
||||
: []),
|
||||
],
|
||||
more: {
|
||||
time: formatTime(newResponseItem.created_at, 'hh:mm A'),
|
||||
tokens: newResponseItem.answer_tokens + newResponseItem.message_tokens,
|
||||
latency: newResponseItem.provider_response_latency.toFixed(2),
|
||||
},
|
||||
// for agent log
|
||||
conversationId: conversationId.current,
|
||||
input: {
|
||||
inputs: newResponseItem.inputs,
|
||||
query: newResponseItem.query,
|
||||
},
|
||||
})
|
||||
handleUpdateChatList(newChatList)
|
||||
}
|
||||
if (config?.suggested_questions_after_answer?.enabled && !hasStopResponded.current && onGetSuggestedQuestions) {
|
||||
try {
|
||||
|
|
@ -360,11 +407,11 @@ export const useChat = (
|
|||
if (lastThought)
|
||||
responseItem.agent_thoughts![responseItem.agent_thoughts!.length - 1].message_files = [...(lastThought as any).message_files, file]
|
||||
|
||||
updateCurrentQA({
|
||||
responseItem,
|
||||
questionId,
|
||||
placeholderAnswerId,
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onThought(thought) {
|
||||
|
|
@ -372,6 +419,7 @@ export const useChat = (
|
|||
const response = responseItem as any
|
||||
if (thought.message_id && !hasSetResponseId)
|
||||
response.id = thought.message_id
|
||||
|
||||
if (response.agent_thoughts.length === 0) {
|
||||
response.agent_thoughts.push(thought)
|
||||
}
|
||||
|
|
@ -387,11 +435,11 @@ export const useChat = (
|
|||
responseItem.agent_thoughts!.push(thought)
|
||||
}
|
||||
}
|
||||
updateCurrentQA({
|
||||
responseItem,
|
||||
questionId,
|
||||
placeholderAnswerId,
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onMessageEnd: (messageEnd) => {
|
||||
|
|
@ -401,43 +449,36 @@ export const useChat = (
|
|||
id: messageEnd.metadata.annotation_reply.id,
|
||||
authorName: messageEnd.metadata.annotation_reply.account.name,
|
||||
})
|
||||
const baseState = chatListRef.current.filter(item => item.id !== responseItem.id && item.id !== placeholderAnswerId)
|
||||
const newListWithAnswer = produce(
|
||||
baseState,
|
||||
(draft) => {
|
||||
if (!draft.find(item => item.id === questionId))
|
||||
draft.push({ ...questionItem })
|
||||
|
||||
draft.push({
|
||||
...responseItem,
|
||||
})
|
||||
})
|
||||
handleUpdateChatList(newListWithAnswer)
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
return
|
||||
}
|
||||
responseItem.citation = messageEnd.metadata?.retriever_resources || []
|
||||
const processedFilesFromResponse = getProcessedFilesFromResponse(messageEnd.files || [])
|
||||
responseItem.allFiles = uniqBy([...(responseItem.allFiles || []), ...(processedFilesFromResponse || [])], 'id')
|
||||
|
||||
const newListWithAnswer = produce(
|
||||
chatListRef.current.filter(item => item.id !== responseItem.id && item.id !== placeholderAnswerId),
|
||||
(draft) => {
|
||||
if (!draft.find(item => item.id === questionId))
|
||||
draft.push({ ...questionItem })
|
||||
|
||||
draft.push({ ...responseItem })
|
||||
})
|
||||
handleUpdateChatList(newListWithAnswer)
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onMessageReplace: (messageReplace) => {
|
||||
responseItem.content = messageReplace.answer
|
||||
},
|
||||
onError() {
|
||||
handleResponding(false)
|
||||
const newChatList = produce(chatListRef.current, (draft) => {
|
||||
draft.splice(draft.findIndex(item => item.id === placeholderAnswerId), 1)
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
handleUpdateChatList(newChatList)
|
||||
},
|
||||
onWorkflowStarted: ({ workflow_run_id, task_id }) => {
|
||||
taskIdRef.current = task_id
|
||||
|
|
@ -446,89 +487,84 @@ export const useChat = (
|
|||
status: WorkflowRunningStatus.Running,
|
||||
tracing: [],
|
||||
}
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onWorkflowFinished: ({ data }) => {
|
||||
responseItem.workflowProcess!.status = data.status as WorkflowRunningStatus
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
onWorkflowFinished: ({ data: workflowFinishedData }) => {
|
||||
responseItem.workflowProcess!.status = workflowFinishedData.status as WorkflowRunningStatus
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onIterationStart: ({ data }) => {
|
||||
onIterationStart: ({ data: iterationStartedData }) => {
|
||||
responseItem.workflowProcess!.tracing!.push({
|
||||
...data,
|
||||
...iterationStartedData,
|
||||
status: WorkflowRunningStatus.Running,
|
||||
} as any)
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onIterationFinish: ({ data }) => {
|
||||
onIterationFinish: ({ data: iterationFinishedData }) => {
|
||||
const tracing = responseItem.workflowProcess!.tracing!
|
||||
const iterationIndex = tracing.findIndex(item => item.node_id === data.node_id
|
||||
&& (item.execution_metadata?.parallel_id === data.execution_metadata?.parallel_id || item.parallel_id === data.execution_metadata?.parallel_id))!
|
||||
const iterationIndex = tracing.findIndex(item => item.node_id === iterationFinishedData.node_id
|
||||
&& (item.execution_metadata?.parallel_id === iterationFinishedData.execution_metadata?.parallel_id || item.parallel_id === iterationFinishedData.execution_metadata?.parallel_id))!
|
||||
tracing[iterationIndex] = {
|
||||
...tracing[iterationIndex],
|
||||
...data,
|
||||
...iterationFinishedData,
|
||||
status: WorkflowRunningStatus.Succeeded,
|
||||
} as any
|
||||
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onNodeStarted: ({ data }) => {
|
||||
if (data.iteration_id)
|
||||
onNodeStarted: ({ data: nodeStartedData }) => {
|
||||
if (nodeStartedData.iteration_id)
|
||||
return
|
||||
|
||||
responseItem.workflowProcess!.tracing!.push({
|
||||
...data,
|
||||
...nodeStartedData,
|
||||
status: WorkflowRunningStatus.Running,
|
||||
} as any)
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
},
|
||||
onNodeFinished: ({ data }) => {
|
||||
if (data.iteration_id)
|
||||
onNodeFinished: ({ data: nodeFinishedData }) => {
|
||||
if (nodeFinishedData.iteration_id)
|
||||
return
|
||||
|
||||
const currentIndex = responseItem.workflowProcess!.tracing!.findIndex((item) => {
|
||||
if (!item.execution_metadata?.parallel_id)
|
||||
return item.node_id === data.node_id
|
||||
return item.node_id === nodeFinishedData.node_id
|
||||
|
||||
return item.node_id === data.node_id && (item.execution_metadata?.parallel_id === data.execution_metadata.parallel_id)
|
||||
return item.node_id === nodeFinishedData.node_id && (item.execution_metadata?.parallel_id === nodeFinishedData.execution_metadata.parallel_id)
|
||||
})
|
||||
responseItem.workflowProcess!.tracing[currentIndex] = nodeFinishedData as any
|
||||
|
||||
updateCurrentQAOnTree({
|
||||
placeholderQuestionId,
|
||||
questionItem,
|
||||
responseItem,
|
||||
parentId: data.parent_message_id,
|
||||
})
|
||||
responseItem.workflowProcess!.tracing[currentIndex] = data as any
|
||||
handleUpdateChatList(produce(chatListRef.current, (draft) => {
|
||||
const currentIndex = draft.findIndex(item => item.id === responseItem.id)
|
||||
draft[currentIndex] = {
|
||||
...draft[currentIndex],
|
||||
...responseItem,
|
||||
}
|
||||
}))
|
||||
},
|
||||
onTTSChunk: (messageId: string, audio: string) => {
|
||||
if (!audio || audio === '')
|
||||
|
|
@ -542,11 +578,13 @@ export const useChat = (
|
|||
})
|
||||
return true
|
||||
}, [
|
||||
config?.suggested_questions_after_answer,
|
||||
updateCurrentQA,
|
||||
t,
|
||||
chatTree.length,
|
||||
threadMessages,
|
||||
config?.suggested_questions_after_answer,
|
||||
updateCurrentQAOnTree,
|
||||
updateChatTreeNode,
|
||||
notify,
|
||||
handleUpdateChatList,
|
||||
handleResponding,
|
||||
formatTime,
|
||||
params.token,
|
||||
|
|
@ -556,76 +594,61 @@ export const useChat = (
|
|||
])
|
||||
|
||||
const handleAnnotationEdited = useCallback((query: string, answer: string, index: number) => {
|
||||
handleUpdateChatList(chatListRef.current.map((item, i) => {
|
||||
if (i === index - 1) {
|
||||
return {
|
||||
...item,
|
||||
content: query,
|
||||
}
|
||||
}
|
||||
if (i === index) {
|
||||
return {
|
||||
...item,
|
||||
content: answer,
|
||||
annotation: {
|
||||
...item.annotation,
|
||||
logAnnotation: undefined,
|
||||
} as any,
|
||||
}
|
||||
}
|
||||
return item
|
||||
}))
|
||||
}, [handleUpdateChatList])
|
||||
const targetQuestionId = chatList[index - 1].id
|
||||
const targetAnswerId = chatList[index].id
|
||||
|
||||
updateChatTreeNode(targetQuestionId, {
|
||||
content: query,
|
||||
})
|
||||
updateChatTreeNode(targetAnswerId, {
|
||||
content: answer,
|
||||
annotation: {
|
||||
...chatList[index].annotation,
|
||||
logAnnotation: undefined,
|
||||
} as any,
|
||||
})
|
||||
}, [chatList, updateChatTreeNode])
|
||||
|
||||
const handleAnnotationAdded = useCallback((annotationId: string, authorName: string, query: string, answer: string, index: number) => {
|
||||
handleUpdateChatList(chatListRef.current.map((item, i) => {
|
||||
if (i === index - 1) {
|
||||
return {
|
||||
...item,
|
||||
content: query,
|
||||
}
|
||||
}
|
||||
if (i === index) {
|
||||
const answerItem = {
|
||||
...item,
|
||||
content: item.content,
|
||||
annotation: {
|
||||
id: annotationId,
|
||||
authorName,
|
||||
logAnnotation: {
|
||||
content: answer,
|
||||
account: {
|
||||
id: '',
|
||||
name: authorName,
|
||||
email: '',
|
||||
},
|
||||
},
|
||||
} as Annotation,
|
||||
}
|
||||
return answerItem
|
||||
}
|
||||
return item
|
||||
}))
|
||||
}, [handleUpdateChatList])
|
||||
const handleAnnotationRemoved = useCallback((index: number) => {
|
||||
handleUpdateChatList(chatListRef.current.map((item, i) => {
|
||||
if (i === index) {
|
||||
return {
|
||||
...item,
|
||||
content: item.content,
|
||||
annotation: {
|
||||
...(item.annotation || {}),
|
||||
const targetQuestionId = chatList[index - 1].id
|
||||
const targetAnswerId = chatList[index].id
|
||||
|
||||
updateChatTreeNode(targetQuestionId, {
|
||||
content: query,
|
||||
})
|
||||
|
||||
updateChatTreeNode(targetAnswerId, {
|
||||
content: chatList[index].content,
|
||||
annotation: {
|
||||
id: annotationId,
|
||||
authorName,
|
||||
logAnnotation: {
|
||||
content: answer,
|
||||
account: {
|
||||
id: '',
|
||||
} as Annotation,
|
||||
}
|
||||
}
|
||||
return item
|
||||
}))
|
||||
}, [handleUpdateChatList])
|
||||
name: authorName,
|
||||
email: '',
|
||||
},
|
||||
},
|
||||
} as Annotation,
|
||||
})
|
||||
}, [chatList, updateChatTreeNode])
|
||||
|
||||
const handleAnnotationRemoved = useCallback((index: number) => {
|
||||
const targetAnswerId = chatList[index].id
|
||||
|
||||
updateChatTreeNode(targetAnswerId, {
|
||||
content: chatList[index].content,
|
||||
annotation: {
|
||||
...(chatList[index].annotation || {}),
|
||||
id: '',
|
||||
} as Annotation,
|
||||
})
|
||||
}, [chatList, updateChatTreeNode])
|
||||
|
||||
return {
|
||||
chatList,
|
||||
chatListRef,
|
||||
handleUpdateChatList,
|
||||
setTargetMessageId,
|
||||
conversationId: conversationId.current,
|
||||
isResponding,
|
||||
setIsResponding,
|
||||
|
|
|
|||
|
|
@ -3,10 +3,11 @@ import Chat from '../chat'
|
|||
import type {
|
||||
ChatConfig,
|
||||
ChatItem,
|
||||
ChatItemInTree,
|
||||
OnSend,
|
||||
} from '../types'
|
||||
import { useChat } from '../chat/hooks'
|
||||
import { getLastAnswer } from '../utils'
|
||||
import { getLastAnswer, isValidGeneratedAnswer } from '../utils'
|
||||
import { useEmbeddedChatbotContext } from './context'
|
||||
import ConfigPanel from './config-panel'
|
||||
import { isDify } from './utils'
|
||||
|
|
@ -51,13 +52,12 @@ const ChatWrapper = () => {
|
|||
} as ChatConfig
|
||||
}, [appParams, currentConversationItem?.introduction, currentConversationId])
|
||||
const {
|
||||
chatListRef,
|
||||
chatList,
|
||||
setTargetMessageId,
|
||||
handleSend,
|
||||
handleStop,
|
||||
isResponding,
|
||||
suggestedQuestions,
|
||||
handleUpdateChatList,
|
||||
} = useChat(
|
||||
appConfig,
|
||||
{
|
||||
|
|
@ -71,15 +71,15 @@ const ChatWrapper = () => {
|
|||
useEffect(() => {
|
||||
if (currentChatInstanceRef.current)
|
||||
currentChatInstanceRef.current.handleStop = handleStop
|
||||
}, [])
|
||||
}, [currentChatInstanceRef, handleStop])
|
||||
|
||||
const doSend: OnSend = useCallback((message, files, last_answer) => {
|
||||
const doSend: OnSend = useCallback((message, files, isRegenerate = false, parentAnswer: ChatItem | null = null) => {
|
||||
const data: any = {
|
||||
query: message,
|
||||
files,
|
||||
inputs: currentConversationId ? currentConversationItem?.inputs : newConversationInputs,
|
||||
conversation_id: currentConversationId,
|
||||
parent_message_id: last_answer?.id || getLastAnswer(chatListRef.current)?.id || null,
|
||||
parent_message_id: (isRegenerate ? parentAnswer?.id : getLastAnswer(chatList)?.id) || null,
|
||||
}
|
||||
|
||||
handleSend(
|
||||
|
|
@ -92,32 +92,21 @@ const ChatWrapper = () => {
|
|||
},
|
||||
)
|
||||
}, [
|
||||
chatListRef,
|
||||
appConfig,
|
||||
chatList,
|
||||
handleNewConversationCompleted,
|
||||
handleSend,
|
||||
currentConversationId,
|
||||
currentConversationItem,
|
||||
handleSend,
|
||||
newConversationInputs,
|
||||
handleNewConversationCompleted,
|
||||
isInstalledApp,
|
||||
appId,
|
||||
])
|
||||
|
||||
const doRegenerate = useCallback((chatItem: ChatItem) => {
|
||||
const index = chatList.findIndex(item => item.id === chatItem.id)
|
||||
if (index === -1)
|
||||
return
|
||||
|
||||
const prevMessages = chatList.slice(0, index)
|
||||
const question = prevMessages.pop()
|
||||
const lastAnswer = getLastAnswer(prevMessages)
|
||||
|
||||
if (!question)
|
||||
return
|
||||
|
||||
handleUpdateChatList(prevMessages)
|
||||
doSend(question.content, question.message_files, lastAnswer)
|
||||
}, [chatList, handleUpdateChatList, doSend])
|
||||
const doRegenerate = useCallback((chatItem: ChatItemInTree) => {
|
||||
const question = chatList.find(item => item.id === chatItem.parentMessageId)!
|
||||
const parentAnswer = chatList.find(item => item.id === question.parentMessageId)
|
||||
doSend(question.content, question.message_files, true, isValidGeneratedAnswer(parentAnswer) ? parentAnswer : null)
|
||||
}, [chatList, doSend])
|
||||
|
||||
const chatNode = useMemo(() => {
|
||||
if (inputsForms.length) {
|
||||
|
|
@ -172,6 +161,7 @@ const ChatWrapper = () => {
|
|||
answerIcon={answerIcon}
|
||||
hideProcessDetail
|
||||
themeBuilder={themeBuilder}
|
||||
switchSibling={siblingMessageId => setTargetMessageId(siblingMessageId)}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,9 +67,12 @@ export type ChatItem = IChatItem & {
|
|||
|
||||
export type ChatItemInTree = {
|
||||
children?: ChatItemInTree[]
|
||||
} & IChatItem
|
||||
} & ChatItem
|
||||
|
||||
export type OnSend = (message: string, files?: FileEntity[], last_answer?: ChatItem | null) => void
|
||||
export type OnSend = {
|
||||
(message: string, files?: FileEntity[]): void
|
||||
(message: string, files: FileEntity[] | undefined, isRegenerate: boolean, lastAnswer?: ChatItem | null): void
|
||||
}
|
||||
|
||||
export type OnRegenerate = (chatItem: ChatItem) => void
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
import { addFileInfos, sortAgentSorts } from '../../tools/utils'
|
||||
import { UUID_NIL } from './constants'
|
||||
import type { IChatItem } from './chat/type'
|
||||
import type { ChatItem, ChatItemInTree } from './types'
|
||||
import { getProcessedFilesFromResponse } from '@/app/components/base/file-uploader/utils'
|
||||
|
||||
async function decodeBase64AndDecompress(base64String: string) {
|
||||
const binaryString = atob(base64String)
|
||||
|
|
@ -21,67 +19,24 @@ function getProcessedInputsFromUrlParams(): Record<string, any> {
|
|||
return inputs
|
||||
}
|
||||
|
||||
function getLastAnswer(chatList: ChatItem[]) {
|
||||
function isValidGeneratedAnswer(item?: ChatItem | ChatItemInTree): boolean {
|
||||
return !!item && item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement
|
||||
}
|
||||
|
||||
function getLastAnswer<T extends ChatItem | ChatItemInTree>(chatList: T[]): T | null {
|
||||
for (let i = chatList.length - 1; i >= 0; i--) {
|
||||
const item = chatList[i]
|
||||
if (item.isAnswer && !item.id.startsWith('answer-placeholder-') && !item.isOpeningStatement)
|
||||
if (isValidGeneratedAnswer(item))
|
||||
return item
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
function appendQAToChatList(chatList: ChatItem[], item: any) {
|
||||
// we append answer first and then question since will reverse the whole chatList later
|
||||
const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || []
|
||||
chatList.push({
|
||||
id: item.id,
|
||||
content: item.answer,
|
||||
agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files),
|
||||
feedback: item.feedback,
|
||||
isAnswer: true,
|
||||
citation: item.retriever_resources,
|
||||
message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
})
|
||||
const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || []
|
||||
chatList.push({
|
||||
id: `question-${item.id}`,
|
||||
content: item.query,
|
||||
isAnswer: false,
|
||||
message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))),
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the latest thread messages from all messages of the conversation.
|
||||
* Same logic as backend codebase `api/core/prompt/utils/extract_thread_messages.py`
|
||||
*
|
||||
* @param fetchedMessages - The history chat list data from the backend, sorted by created_at in descending order. This includes all flattened history messages of the conversation.
|
||||
* @returns An array of ChatItems representing the latest thread.
|
||||
* Build a chat item tree from a chat list
|
||||
* @param allMessages - The chat list, sorted from oldest to newest
|
||||
* @returns The chat item tree
|
||||
*/
|
||||
function getPrevChatList(fetchedMessages: any[]) {
|
||||
const ret: ChatItem[] = []
|
||||
let nextMessageId = null
|
||||
|
||||
for (const item of fetchedMessages) {
|
||||
if (!item.parent_message_id) {
|
||||
appendQAToChatList(ret, item)
|
||||
break
|
||||
}
|
||||
|
||||
if (!nextMessageId) {
|
||||
appendQAToChatList(ret, item)
|
||||
nextMessageId = item.parent_message_id
|
||||
}
|
||||
else {
|
||||
if (item.id === nextMessageId || nextMessageId === UUID_NIL) {
|
||||
appendQAToChatList(ret, item)
|
||||
nextMessageId = item.parent_message_id
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret.reverse()
|
||||
}
|
||||
|
||||
function buildChatItemTree(allMessages: IChatItem[]): ChatItemInTree[] {
|
||||
const map: Record<string, ChatItemInTree> = {}
|
||||
const rootNodes: ChatItemInTree[] = []
|
||||
|
|
@ -208,7 +163,7 @@ function getThreadMessages(tree: ChatItemInTree[], targetMessageId?: string): Ch
|
|||
|
||||
export {
|
||||
getProcessedInputsFromUrlParams,
|
||||
getPrevChatList,
|
||||
isValidGeneratedAnswer,
|
||||
getLastAnswer,
|
||||
buildChatItemTree,
|
||||
getThreadMessages,
|
||||
|
|
|
|||
|
|
@ -229,7 +229,11 @@ export function Markdown(props: { content: string; className?: string }) {
|
|||
return (
|
||||
<div className={cn(props.className, 'markdown-body')}>
|
||||
<ReactMarkdown
|
||||
remarkPlugins={[RemarkGfm, RemarkMath, RemarkBreaks]}
|
||||
remarkPlugins={[
|
||||
RemarkGfm,
|
||||
[RemarkMath, { singleDollarTextMath: false }],
|
||||
RemarkBreaks,
|
||||
]}
|
||||
rehypePlugins={[
|
||||
RehypeKatex,
|
||||
RehypeRaw as any,
|
||||
|
|
|
|||
|
|
@ -192,8 +192,8 @@ The text generation application offers non-session support and is ideal for tran
|
|||
data: {"event": "message", "message_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": "'m", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " glad", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " to", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": : "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " meet", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": : "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " you", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " meet", "created_at": 1679586595}
|
||||
data: {"event": "message", "message_id": "5ad4cb98-f0c7-4085-b384-88c403be6290", "answer": " you", "created_at": 1679586595}
|
||||
data: {"event": "message_end", "id": "5e52ce04-874b-4d27-9045-b3bc80def685", "metadata": {"usage": {"prompt_tokens": 1033, "prompt_unit_price": "0.001", "prompt_price_unit": "0.001", "prompt_price": "0.0010330", "completion_tokens": 135, "completion_unit_price": "0.002", "completion_price_unit": "0.001", "completion_price": "0.0002700", "total_tokens": 1168, "total_price": "0.0013030", "currency": "USD", "latency": 1.381760165997548}}}
|
||||
data: {"event": "tts_message", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": "qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"}
|
||||
data: {"event": "tts_message_end", "conversation_id": "23dd85f3-1a41-4ea0-b7a9-062734ccfaf9", "message_id": "a8bdc41c-13b2-4c18-bfd9-054b9803038c", "created_at": 1721205487, "task_id": "3bf8a0bb-e73b-4690-9e66-4e429bad8ee7", "audio": ""}
|
||||
|
|
@ -400,7 +400,7 @@ The text generation application offers non-session support and is ideal for tran
|
|||
For text messages generated by Dify, simply pass the generated message-id directly. The backend will use the message-id to look up the corresponding content and synthesize the voice information directly. If both message_id and text are provided simultaneously, the message_id is given priority.
|
||||
</Property>
|
||||
<Property name='text' type='str' key='text'>
|
||||
Speech generated content。
|
||||
Speech generated content.
|
||||
</Property>
|
||||
<Property name='user' type='string' key='user'>
|
||||
The user identifier, defined by the developer, must ensure uniqueness within the app.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
import { BlockEnum } from './types'
|
||||
|
||||
export const ALL_AVAILABLE_BLOCKS = Object.values(BlockEnum)
|
||||
export const ALL_CHAT_AVAILABLE_BLOCKS = ALL_AVAILABLE_BLOCKS.filter(key => key !== BlockEnum.End && key !== BlockEnum.Start) as BlockEnum[]
|
||||
export const ALL_COMPLETION_AVAILABLE_BLOCKS = ALL_AVAILABLE_BLOCKS.filter(key => key !== BlockEnum.Answer && key !== BlockEnum.Start) as BlockEnum[]
|
||||
|
|
@ -203,9 +203,6 @@ export const NODES_EXTRA_DATA: Record<BlockEnum, NodesExtraData> = {
|
|||
|
||||
}
|
||||
|
||||
export const ALL_CHAT_AVAILABLE_BLOCKS = Object.keys(NODES_EXTRA_DATA).filter(key => key !== BlockEnum.End && key !== BlockEnum.Start) as BlockEnum[]
|
||||
export const ALL_COMPLETION_AVAILABLE_BLOCKS = Object.keys(NODES_EXTRA_DATA).filter(key => key !== BlockEnum.Answer && key !== BlockEnum.Start) as BlockEnum[]
|
||||
|
||||
export const NODES_INITIAL_DATA = {
|
||||
[BlockEnum.Start]: {
|
||||
type: BlockEnum.Start,
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import type { AnswerNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const nodeDefault: NodeDefault<AnswerNodeType> = {
|
||||
defaultValue: {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import { type AssignerNodeType, WriteMode } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
const nodeDefault: NodeDefault<AssignerNodeType> = {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import { CodeLanguage, type CodeNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import { type DocExtractorNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
const nodeDefault: NodeDefault<DocExtractorNodeType> = {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import { type EndNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const nodeDefault: NodeDefault<EndNodeType> = {
|
||||
defaultValue: {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import type { BodyPayload, HttpNodeType } from './types'
|
|||
import {
|
||||
ALL_CHAT_AVAILABLE_BLOCKS,
|
||||
ALL_COMPLETION_AVAILABLE_BLOCKS,
|
||||
} from '@/app/components/workflow/constants'
|
||||
} from '@/app/components/workflow/blocks'
|
||||
|
||||
const nodeDefault: NodeDefault<HttpNodeType> = {
|
||||
defaultValue: {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { BlockEnum, type NodeDefault } from '../../types'
|
|||
import { type IfElseNodeType, LogicalOperator } from './types'
|
||||
import { isEmptyRelatedOperator } from './utils'
|
||||
import { TransferMethod } from '@/types/app'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
const nodeDefault: NodeDefault<IfElseNodeType> = {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import type { NodeDefault } from '../../types'
|
||||
import type { IterationStartNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const nodeDefault: NodeDefault<IterationStartNodeType> = {
|
||||
defaultValue: {},
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import type { IterationNodeType } from './types'
|
|||
import {
|
||||
ALL_CHAT_AVAILABLE_BLOCKS,
|
||||
ALL_COMPLETION_AVAILABLE_BLOCKS,
|
||||
} from '@/app/components/workflow/constants'
|
||||
} from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow'
|
||||
|
||||
const nodeDefault: NodeDefault<IterationNodeType> = {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { BlockEnum } from '../../types'
|
|||
import type { NodeDefault } from '../../types'
|
||||
import type { KnowledgeRetrievalNodeType } from './types'
|
||||
import { checkoutRerankModelConfigedInRetrievalSettings } from './utils'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
import { DATASET_DEFAULT } from '@/config'
|
||||
import { RETRIEVE_TYPE } from '@/types/app'
|
||||
const i18nPrefix = 'workflow'
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { BlockEnum, VarType } from '../../types'
|
|||
import type { NodeDefault } from '../../types'
|
||||
import { comparisonOperatorNotRequireValue } from '../if-else/utils'
|
||||
import { type ListFilterNodeType, OrderBy } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
const nodeDefault: NodeDefault<ListFilterNodeType> = {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum, EditionType } from '../../types'
|
||||
import { type NodeDefault, type PromptItem, PromptRole } from '../../types'
|
||||
import type { LLMNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import { type ParameterExtractorNodeType, ReasoningModeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow'
|
||||
|
||||
const nodeDefault: NodeDefault<ParameterExtractorNodeType> = {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import type { NodeDefault } from '../../types'
|
||||
import { BlockEnum } from '../../types'
|
||||
import type { QuestionClassifierNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const i18nPrefix = 'workflow'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import type { NodeDefault } from '../../types'
|
||||
import type { StartNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const nodeDefault: NodeDefault<StartNodeType> = {
|
||||
defaultValue: {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { BlockEnum } from '../../types'
|
||||
import type { NodeDefault } from '../../types'
|
||||
import type { TemplateTransformNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
const nodeDefault: NodeDefault<TemplateTransformNodeType> = {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { BlockEnum } from '../../types'
|
|||
import type { NodeDefault } from '../../types'
|
||||
import type { ToolNodeType } from './types'
|
||||
import { VarType as VarKindType } from '@/app/components/workflow/nodes/tool/types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const i18nPrefix = 'workflow.errorMsg'
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { type NodeDefault, VarType } from '../../types'
|
||||
import { BlockEnum } from '../../types'
|
||||
import type { VariableAssignerNodeType } from './types'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/constants'
|
||||
import { ALL_CHAT_AVAILABLE_BLOCKS, ALL_COMPLETION_AVAILABLE_BLOCKS } from '@/app/components/workflow/blocks'
|
||||
|
||||
const i18nPrefix = 'workflow'
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue