diff --git a/api/Dockerfile b/api/Dockerfile index 6483f8281b..d32f70321d 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -67,6 +67,10 @@ ENV PATH="${VIRTUAL_ENV}/bin:${PATH}" # Download nltk data RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')" +ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache + +RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')" + # Copy source code COPY . /app/api/ diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 829ef11e52..ca6c571727 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -6,7 +6,6 @@ from flask import request from flask_login import current_user from flask_restful import Resource, fields, marshal, marshal_with, reqparse from sqlalchemy import asc, desc -from transformers.hf_argparser import string_to_bool from werkzeug.exceptions import Forbidden, NotFound import services @@ -145,7 +144,19 @@ class DatasetDocumentListApi(Resource): sort = request.args.get("sort", default="-created_at", type=str) # "yes", "true", "t", "y", "1" convert to True, while others convert to False. try: - fetch = string_to_bool(request.args.get("fetch", default="false")) + fetch_val = request.args.get("fetch", default="false") + if isinstance(fetch_val, bool): + fetch = fetch_val + else: + if fetch_val.lower() in ("yes", "true", "t", "y", "1"): + fetch = True + elif fetch_val.lower() in ("no", "false", "f", "n", "0"): + fetch = False + else: + raise ArgumentTypeError( + f"Truthy value expected: got {fetch_val} but expected one of yes/no, true/false, t/f, y/n, 1/0 " + f"(case insensitive)." + ) except (ArgumentTypeError, ValueError, Exception) as e: fetch = False dataset = DatasetService.get_dataset(dataset_id) diff --git a/api/core/model_runtime/entities/model_entities.py b/api/core/model_runtime/entities/model_entities.py index 52ea787c3a..09b9e3aa0d 100644 --- a/api/core/model_runtime/entities/model_entities.py +++ b/api/core/model_runtime/entities/model_entities.py @@ -18,7 +18,6 @@ class ModelType(Enum): SPEECH2TEXT = "speech2text" MODERATION = "moderation" TTS = "tts" - TEXT2IMG = "text2img" @classmethod def value_of(cls, origin_model_type: str) -> "ModelType": @@ -37,8 +36,6 @@ class ModelType(Enum): return cls.SPEECH2TEXT elif origin_model_type in {"tts", cls.TTS.value}: return cls.TTS - elif origin_model_type in {"text2img", cls.TEXT2IMG.value}: - return cls.TEXT2IMG elif origin_model_type == cls.MODERATION.value: return cls.MODERATION else: @@ -62,8 +59,6 @@ class ModelType(Enum): return "tts" elif self == self.MODERATION: return "moderation" - elif self == self.TEXT2IMG: - return "text2img" else: raise ValueError(f"invalid model type {self}") diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index 6b04ba2efd..a28d69ce80 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -1,26 +1,18 @@ import decimal -import os -from collections.abc import Mapping from typing import Optional from pydantic import ConfigDict, Field -from core.helper.position_helper import get_position_map, sort_by_position_map -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.defaults import PARAMETER_RULE_TEMPLATE from core.model_runtime.entities.model_entities import ( AIModelEntity, - DefaultParameterName, - FetchFrom, ModelType, PriceConfig, PriceInfo, PriceType, ) from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer from core.plugin.entities.plugin_daemon import PluginModelProviderEntity -from core.tools.utils.yaml_utils import load_yaml_file +from core.plugin.manager.model import PluginModelManager class AIModel: @@ -117,93 +109,7 @@ class AIModel: currency=price_config.currency, ) - def predefined_models(self) -> list[AIModelEntity]: - """ - Get all predefined models for given provider. - - :return: - """ - if self.model_schemas: - return self.model_schemas - - model_schemas = [] - - # get module name - model_type = self.__class__.__module__.split(".")[-1] - - # get provider name - provider_name = self.__class__.__module__.split(".")[-3] - - # get the path of current classes - current_path = os.path.abspath(__file__) - # get parent path of the current path - provider_model_type_path = os.path.join( - os.path.dirname(os.path.dirname(current_path)), provider_name, model_type - ) - - # get all yaml files path under provider_model_type_path that do not start with __ - model_schema_yaml_paths = [ - os.path.join(provider_model_type_path, model_schema_yaml) - for model_schema_yaml in os.listdir(provider_model_type_path) - if not model_schema_yaml.startswith("__") - and not model_schema_yaml.startswith("_") - and os.path.isfile(os.path.join(provider_model_type_path, model_schema_yaml)) - and model_schema_yaml.endswith(".yaml") - ] - - # get _position.yaml file path - position_map = get_position_map(provider_model_type_path) - - # traverse all model_schema_yaml_paths - for model_schema_yaml_path in model_schema_yaml_paths: - # read yaml data from yaml file - yaml_data = load_yaml_file(model_schema_yaml_path) - - new_parameter_rules = [] - for parameter_rule in yaml_data.get("parameter_rules", []): - if "use_template" in parameter_rule: - try: - default_parameter_name = DefaultParameterName.value_of(parameter_rule["use_template"]) - default_parameter_rule = self._get_default_parameter_rule_variable_map(default_parameter_name) - copy_default_parameter_rule = default_parameter_rule.copy() - copy_default_parameter_rule.update(parameter_rule) - parameter_rule = copy_default_parameter_rule - except ValueError: - pass - - if "label" not in parameter_rule: - parameter_rule["label"] = {"zh_Hans": parameter_rule["name"], "en_US": parameter_rule["name"]} - - new_parameter_rules.append(parameter_rule) - - yaml_data["parameter_rules"] = new_parameter_rules - - if "label" not in yaml_data: - yaml_data["label"] = {"zh_Hans": yaml_data["model"], "en_US": yaml_data["model"]} - - yaml_data["fetch_from"] = FetchFrom.PREDEFINED_MODEL.value - - try: - # yaml_data to entity - model_schema = AIModelEntity(**yaml_data) - except Exception as e: - model_schema_yaml_file_name = os.path.basename(model_schema_yaml_path).rstrip(".yaml") - raise Exception( - f"Invalid model schema for {provider_name}.{model_type}.{model_schema_yaml_file_name}: {str(e)}" - ) - - # cache model schema - model_schemas.append(model_schema) - - # resort model schemas by position - model_schemas = sort_by_position_map(position_map, model_schemas, lambda x: x.model) - - # cache model schemas - self.model_schemas = model_schemas - - return model_schemas - - def get_model_schema(self, model: str, credentials: Optional[Mapping] = None) -> Optional[AIModelEntity]: + def get_model_schema(self, model: str, credentials: Optional[dict] = None) -> Optional[AIModelEntity]: """ Get model schema by model name and credentials @@ -211,117 +117,13 @@ class AIModel: :param credentials: model credentials :return: model schema """ - # get predefined models (predefined_models) - models = self.predefined_models() - - model_map = {model.model: model for model in models} - if model in model_map: - return model_map[model] - - if credentials: - model_schema = self.get_customizable_model_schema_from_credentials(model, credentials) - if model_schema: - return model_schema - - return None - - def get_customizable_model_schema_from_credentials( - self, model: str, credentials: Mapping - ) -> Optional[AIModelEntity]: - """ - Get customizable model schema from credentials - - :param model: model name - :param credentials: model credentials - :return: model schema - """ - return self._get_customizable_model_schema(model, credentials) - - def _get_customizable_model_schema(self, model: str, credentials: Mapping) -> Optional[AIModelEntity]: - """ - Get customizable model schema and fill in the template - """ - schema = self.get_customizable_model_schema(model, credentials) - - if not schema: - return None - - # fill in the template - new_parameter_rules = [] - for parameter_rule in schema.parameter_rules: - if parameter_rule.use_template: - try: - default_parameter_name = DefaultParameterName.value_of(parameter_rule.use_template) - default_parameter_rule = self._get_default_parameter_rule_variable_map(default_parameter_name) - if not parameter_rule.max and "max" in default_parameter_rule: - parameter_rule.max = default_parameter_rule["max"] - if not parameter_rule.min and "min" in default_parameter_rule: - parameter_rule.min = default_parameter_rule["min"] - if not parameter_rule.default and "default" in default_parameter_rule: - parameter_rule.default = default_parameter_rule["default"] - if not parameter_rule.precision and "precision" in default_parameter_rule: - parameter_rule.precision = default_parameter_rule["precision"] - if not parameter_rule.required and "required" in default_parameter_rule: - parameter_rule.required = default_parameter_rule["required"] - if not parameter_rule.help and "help" in default_parameter_rule: - parameter_rule.help = I18nObject( - en_US=default_parameter_rule["help"]["en_US"], - ) - if ( - parameter_rule.help - and not parameter_rule.help.en_US - and ("help" in default_parameter_rule and "en_US" in default_parameter_rule["help"]) - ): - parameter_rule.help.en_US = default_parameter_rule["help"]["en_US"] - if ( - parameter_rule.help - and not parameter_rule.help.zh_Hans - and ("help" in default_parameter_rule and "zh_Hans" in default_parameter_rule["help"]) - ): - parameter_rule.help.zh_Hans = default_parameter_rule["help"].get( - "zh_Hans", default_parameter_rule["help"]["en_US"] - ) - except ValueError: - pass - - new_parameter_rules.append(parameter_rule) - - schema.parameter_rules = new_parameter_rules - - return schema - - def get_customizable_model_schema(self, model: str, credentials: Mapping) -> Optional[AIModelEntity]: - """ - Get customizable model schema - - :param model: model name - :param credentials: model credentials - :return: model schema - """ - return None - - def _get_default_parameter_rule_variable_map(self, name: DefaultParameterName) -> dict: - """ - Get default parameter rule for given name - - :param name: parameter name - :return: parameter rule - """ - default_parameter_rule = PARAMETER_RULE_TEMPLATE.get(name) - - if not default_parameter_rule: - raise Exception(f"Invalid model parameter rule name {name}") - - return default_parameter_rule - - def _get_num_tokens_by_gpt2(self, text: str) -> int: - """ - Get number of tokens for given prompt messages by gpt2 - Some provider models do not provide an interface for obtaining the number of tokens. - Here, the gpt2 tokenizer is used to calculate the number of tokens. - This method can be executed offline, and the gpt2 tokenizer has been cached in the project. - - :param text: plain text of prompt. You need to convert the original message to plain text - :return: number of tokens - """ - return GPT2Tokenizer.get_num_tokens(text) + plugin_model_manager = PluginModelManager() + return plugin_model_manager.get_model_schema( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model_type=self.model_type.value, + model=model, + credentials=credentials or {}, + ) diff --git a/api/core/model_runtime/model_providers/__base/audio.mp3 b/api/core/model_runtime/model_providers/__base/audio.mp3 deleted file mode 100644 index 7c86e02e16..0000000000 Binary files a/api/core/model_runtime/model_providers/__base/audio.mp3 and /dev/null differ diff --git a/api/core/model_runtime/model_providers/__base/model_provider.py b/api/core/model_runtime/model_providers/__base/model_provider.py deleted file mode 100644 index 4374093de4..0000000000 --- a/api/core/model_runtime/model_providers/__base/model_provider.py +++ /dev/null @@ -1,120 +0,0 @@ -import os -from abc import ABC, abstractmethod -from typing import Optional - -from core.helper.module_import_helper import get_subclasses_from_module, import_module_from_source -from core.model_runtime.entities.model_entities import AIModelEntity, ModelType -from core.model_runtime.entities.provider_entities import ProviderEntity -from core.model_runtime.model_providers.__base.ai_model import AIModel -from core.tools.utils.yaml_utils import load_yaml_file - - -class ModelProvider(ABC): - provider_schema: Optional[ProviderEntity] = None - model_instance_map: dict[str, AIModel] = {} - - @abstractmethod - def validate_provider_credentials(self, credentials: dict) -> None: - """ - Validate provider credentials - You can choose any validate_credentials method of model type or implement validate method by yourself, - such as: get model list api - - if validate failed, raise exception - - :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. - """ - raise NotImplementedError - - def get_provider_schema(self) -> ProviderEntity: - """ - Get provider schema - - :return: provider schema - """ - if self.provider_schema: - return self.provider_schema - - # get dirname of the current path - provider_name = self.__class__.__module__.split(".")[-1] - - # get the path of the model_provider classes - base_path = os.path.abspath(__file__) - current_path = os.path.join(os.path.dirname(os.path.dirname(base_path)), provider_name) - - # read provider schema from yaml file - yaml_path = os.path.join(current_path, f"{provider_name}.yaml") - yaml_data = load_yaml_file(yaml_path) - - try: - # yaml_data to entity - provider_schema = ProviderEntity(**yaml_data) - except Exception as e: - raise Exception(f"Invalid provider schema for {provider_name}: {str(e)}") - - # cache schema - self.provider_schema = provider_schema - - return provider_schema - - def models(self, model_type: ModelType) -> list[AIModelEntity]: - """ - Get all models for given model type - - :param model_type: model type defined in `ModelType` - :return: list of models - """ - provider_schema = self.get_provider_schema() - if model_type not in provider_schema.supported_model_types: - return [] - - # get model instance of the model type - model_instance = self.get_model_instance(model_type) - - # get predefined models (predefined_models) - models = model_instance.predefined_models() - - # return models - return models - - def get_model_instance(self, model_type: ModelType) -> AIModel: - """ - Get model instance - - :param model_type: model type defined in `ModelType` - :return: - """ - # get dirname of the current path - provider_name = self.__class__.__module__.split(".")[-1] - - if f"{provider_name}.{model_type.value}" in self.model_instance_map: - return self.model_instance_map[f"{provider_name}.{model_type.value}"] - - # get the path of the model type classes - base_path = os.path.abspath(__file__) - model_type_name = model_type.value.replace("-", "_") - model_type_path = os.path.join(os.path.dirname(os.path.dirname(base_path)), provider_name, model_type_name) - model_type_py_path = os.path.join(model_type_path, f"{model_type_name}.py") - - if not os.path.isdir(model_type_path) or not os.path.exists(model_type_py_path): - raise Exception(f"Invalid model type {model_type} for provider {provider_name}") - - # Dynamic loading {model_type_name}.py file and find the subclass of AIModel - parent_module = ".".join(self.__class__.__module__.split(".")[:-1]) - mod = import_module_from_source( - module_name=f"{parent_module}.{model_type_name}.{model_type_name}", py_file_path=model_type_py_path - ) - model_class = next( - filter( - lambda x: x.__module__ == mod.__name__ and not x.__abstractmethods__, - get_subclasses_from_module(mod, AIModel), - ), - None, - ) - if not model_class: - raise Exception(f"Missing AIModel Class for model type {model_type} in {model_type_py_path}") - - model_instance_map = model_class() - self.model_instance_map[f"{provider_name}.{model_type.value}"] = model_instance_map - - return model_instance_map diff --git a/api/core/model_runtime/model_providers/__base/moderation_model.py b/api/core/model_runtime/model_providers/__base/moderation_model.py index d04414ccb8..f98d7572c7 100644 --- a/api/core/model_runtime/model_providers/__base/moderation_model.py +++ b/api/core/model_runtime/model_providers/__base/moderation_model.py @@ -1,11 +1,11 @@ import time -from abc import abstractmethod from typing import Optional from pydantic import ConfigDict from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager class ModerationModel(AIModel): @@ -31,19 +31,15 @@ class ModerationModel(AIModel): self.started_at = time.perf_counter() try: - return self._invoke(model, credentials, text, user) + plugin_model_manager = PluginModelManager() + return plugin_model_manager.invoke_moderation( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + text=text, + ) except Exception as e: raise self._transform_invoke_error(e) - - @abstractmethod - def _invoke(self, model: str, credentials: dict, text: str, user: Optional[str] = None) -> bool: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param text: text to moderate - :param user: unique user id - :return: false if text is safe, true otherwise - """ - raise NotImplementedError diff --git a/api/core/model_runtime/model_providers/__base/rerank_model.py b/api/core/model_runtime/model_providers/__base/rerank_model.py index 5fb9604742..e905cb18d4 100644 --- a/api/core/model_runtime/model_providers/__base/rerank_model.py +++ b/api/core/model_runtime/model_providers/__base/rerank_model.py @@ -1,10 +1,9 @@ -import time -from abc import abstractmethod from typing import Optional from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.entities.rerank_entities import RerankResult from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager class RerankModel(AIModel): @@ -36,34 +35,19 @@ class RerankModel(AIModel): :param user: unique user id :return: rerank result """ - self.started_at = time.perf_counter() - try: - return self._invoke(model, credentials, query, docs, score_threshold, top_n, user) + plugin_model_manager = PluginModelManager() + return plugin_model_manager.invoke_rerank( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + query=query, + docs=docs, + score_threshold=score_threshold, + top_n=top_n, + ) except Exception as e: raise self._transform_invoke_error(e) - - @abstractmethod - def _invoke( - self, - model: str, - credentials: dict, - query: str, - docs: list[str], - score_threshold: Optional[float] = None, - top_n: Optional[int] = None, - user: Optional[str] = None, - ) -> RerankResult: - """ - Invoke rerank model - - :param model: model name - :param credentials: model credentials - :param query: search query - :param docs: docs for reranking - :param score_threshold: score threshold - :param top_n: top n - :param user: unique user id - :return: rerank result - """ - raise NotImplementedError diff --git a/api/core/model_runtime/model_providers/__base/speech2text_model.py b/api/core/model_runtime/model_providers/__base/speech2text_model.py index b6b0b73743..97ff322f09 100644 --- a/api/core/model_runtime/model_providers/__base/speech2text_model.py +++ b/api/core/model_runtime/model_providers/__base/speech2text_model.py @@ -1,11 +1,10 @@ -import os -from abc import abstractmethod from typing import IO, Optional from pydantic import ConfigDict from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager class Speech2TextModel(AIModel): @@ -20,7 +19,7 @@ class Speech2TextModel(AIModel): def invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: """ - Invoke large language model + Invoke speech to text model :param model: model name :param credentials: model credentials @@ -29,31 +28,15 @@ class Speech2TextModel(AIModel): :return: text for given audio file """ try: - return self._invoke(model, credentials, file, user) + plugin_model_manager = PluginModelManager() + return plugin_model_manager.invoke_speech_to_text( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + file=file, + ) except Exception as e: raise self._transform_invoke_error(e) - - @abstractmethod - def _invoke(self, model: str, credentials: dict, file: IO[bytes], user: Optional[str] = None) -> str: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param file: audio file - :param user: unique user id - :return: text for given audio file - """ - raise NotImplementedError - - def _get_demo_file_path(self) -> str: - """ - Get demo file for given model - - :return: demo file - """ - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - - # Construct the path to the audio file - return os.path.join(current_dir, "audio.mp3") diff --git a/api/core/model_runtime/model_providers/__base/text2img_model.py b/api/core/model_runtime/model_providers/__base/text2img_model.py deleted file mode 100644 index a5810e2f0e..0000000000 --- a/api/core/model_runtime/model_providers/__base/text2img_model.py +++ /dev/null @@ -1,54 +0,0 @@ -from abc import abstractmethod -from typing import IO, Optional - -from pydantic import ConfigDict - -from core.model_runtime.entities.model_entities import ModelType -from core.model_runtime.model_providers.__base.ai_model import AIModel - - -class Text2ImageModel(AIModel): - """ - Model class for text2img model. - """ - - model_type: ModelType = ModelType.TEXT2IMG - - # pydantic configs - model_config = ConfigDict(protected_namespaces=()) - - def invoke( - self, model: str, credentials: dict, prompt: str, model_parameters: dict, user: Optional[str] = None - ) -> list[IO[bytes]]: - """ - Invoke Text2Image model - - :param model: model name - :param credentials: model credentials - :param prompt: prompt for image generation - :param model_parameters: model parameters - :param user: unique user id - - :return: image bytes - """ - try: - return self._invoke(model, credentials, prompt, model_parameters, user) - except Exception as e: - raise self._transform_invoke_error(e) - - @abstractmethod - def _invoke( - self, model: str, credentials: dict, prompt: str, model_parameters: dict, user: Optional[str] = None - ) -> list[IO[bytes]]: - """ - Invoke Text2Image model - - :param model: model name - :param credentials: model credentials - :param prompt: prompt for image generation - :param model_parameters: model parameters - :param user: unique user id - - :return: image bytes - """ - raise NotImplementedError diff --git a/api/core/model_runtime/model_providers/__base/text_embedding_model.py b/api/core/model_runtime/model_providers/__base/text_embedding_model.py index 1a5c40ed51..beade74362 100644 --- a/api/core/model_runtime/model_providers/__base/text_embedding_model.py +++ b/api/core/model_runtime/model_providers/__base/text_embedding_model.py @@ -1,5 +1,3 @@ -import time -from abc import abstractmethod from typing import Optional from pydantic import ConfigDict @@ -39,34 +37,21 @@ class TextEmbeddingModel(AIModel): :param input_type: input type :return: embeddings result """ - self.started_at = time.perf_counter() - try: - return self._invoke(model, credentials, texts, user, input_type) + plugin_model_manager = PluginModelManager() + return plugin_model_manager.invoke_text_embedding( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + texts=texts, + input_type=input_type.value, + ) except Exception as e: raise self._transform_invoke_error(e) - @abstractmethod - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - raise NotImplementedError - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: """ Get number of tokens for given prompt messages @@ -82,7 +67,6 @@ class TextEmbeddingModel(AIModel): user_id="unknown", plugin_id=self.plugin_id, provider=self.provider_name, - model_type=self.model_type.value, model=model, credentials=credentials, texts=texts, diff --git a/api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py b/api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py index 5fe6dda6ad..609c11d22f 100644 --- a/api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py +++ b/api/core/model_runtime/model_providers/__base/tokenizers/gpt2_tokenzier.py @@ -1,34 +1,9 @@ -from os.path import abspath, dirname, join -from threading import Lock -from typing import Any - -from transformers import GPT2Tokenizer as TransformerGPT2Tokenizer - -_tokenizer = None -_lock = Lock() +import tiktoken class GPT2Tokenizer: - @staticmethod - def _get_num_tokens_by_gpt2(text: str) -> int: - """ - use gpt2 tokenizer to get num tokens - """ - _tokenizer = GPT2Tokenizer.get_encoder() - tokens = _tokenizer.encode(text, verbose=False) - return len(tokens) - @staticmethod def get_num_tokens(text: str) -> int: - return GPT2Tokenizer._get_num_tokens_by_gpt2(text) - - @staticmethod - def get_encoder() -> Any: - global _tokenizer, _lock - with _lock: - if _tokenizer is None: - base_path = abspath(__file__) - gpt2_tokenizer_path = join(dirname(base_path), "gpt2") - _tokenizer = TransformerGPT2Tokenizer.from_pretrained(gpt2_tokenizer_path) - - return _tokenizer + encoding = tiktoken.encoding_for_model("gpt2") + tiktoken_vec = encoding.encode(text) + return len(tiktoken_vec) diff --git a/api/core/model_runtime/model_providers/__base/tts_model.py b/api/core/model_runtime/model_providers/__base/tts_model.py index 70be9322a7..8cefa63ebf 100644 --- a/api/core/model_runtime/model_providers/__base/tts_model.py +++ b/api/core/model_runtime/model_providers/__base/tts_model.py @@ -1,12 +1,11 @@ import logging -import re -from abc import abstractmethod from typing import Optional from pydantic import ConfigDict -from core.model_runtime.entities.model_entities import ModelPropertyKey, ModelType +from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.model_providers.__base.ai_model import AIModel +from core.plugin.manager.model import PluginModelManager logger = logging.getLogger(__name__) @@ -37,36 +36,21 @@ class TTSModel(AIModel): :return: translated audio file """ try: - return self._invoke( + plugin_model_manager = PluginModelManager() + return plugin_model_manager.invoke_tts( + tenant_id=self.tenant_id, + user_id=user or "unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, model=model, credentials=credentials, - user=user, content_text=content_text, voice=voice, - tenant_id=tenant_id, ) except Exception as e: raise self._transform_invoke_error(e) - @abstractmethod - def _invoke( - self, model: str, tenant_id: str, credentials: dict, content_text: str, voice: str, user: Optional[str] = None - ): - """ - Invoke large language model - - :param model: model name - :param tenant_id: user tenant id - :param credentials: model credentials - :param voice: model timbre - :param content_text: text content to be translated - :param streaming: output is streaming - :param user: unique user id - :return: translated audio file - """ - raise NotImplementedError - - def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list: + def get_tts_model_voices(self, model: str, credentials: dict, language: Optional[str] = None) -> list[dict]: """ Get voice for given tts model voices @@ -75,83 +59,13 @@ class TTSModel(AIModel): :param credentials: model credentials :return: voices lists """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.VOICES in model_schema.model_properties: - voices = model_schema.model_properties[ModelPropertyKey.VOICES] - if language: - return [ - {"name": d["name"], "value": d["mode"]} - for d in voices - if language and language in d.get("language") - ] - else: - return [{"name": d["name"], "value": d["mode"]} for d in voices] - - def _get_model_default_voice(self, model: str, credentials: dict) -> any: - """ - Get voice for given tts model - - :param model: model name - :param credentials: model credentials - :return: voice - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.DEFAULT_VOICE in model_schema.model_properties: - return model_schema.model_properties[ModelPropertyKey.DEFAULT_VOICE] - - def _get_model_audio_type(self, model: str, credentials: dict) -> str: - """ - Get audio type for given tts model - - :param model: model name - :param credentials: model credentials - :return: voice - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.AUDIO_TYPE in model_schema.model_properties: - return model_schema.model_properties[ModelPropertyKey.AUDIO_TYPE] - - def _get_model_word_limit(self, model: str, credentials: dict) -> int: - """ - Get audio type for given tts model - :return: audio type - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.WORD_LIMIT in model_schema.model_properties: - return model_schema.model_properties[ModelPropertyKey.WORD_LIMIT] - - def _get_model_workers_limit(self, model: str, credentials: dict) -> int: - """ - Get audio max workers for given tts model - :return: audio type - """ - model_schema = self.get_model_schema(model, credentials) - - if model_schema and ModelPropertyKey.MAX_WORKERS in model_schema.model_properties: - return model_schema.model_properties[ModelPropertyKey.MAX_WORKERS] - - @staticmethod - def _split_text_into_sentences(org_text, max_length=2000, pattern=r"[。.!?]"): - match = re.compile(pattern) - tx = match.finditer(org_text) - start = 0 - result = [] - one_sentence = "" - for i in tx: - end = i.regs[0][1] - tmp = org_text[start:end] - if len(one_sentence + tmp) > max_length: - result.append(one_sentence) - one_sentence = "" - one_sentence += tmp - start = end - last_sens = org_text[start:] - if last_sens: - one_sentence += last_sens - if one_sentence != "": - result.append(one_sentence) - return result + plugin_model_manager = PluginModelManager() + return plugin_model_manager.get_tts_model_voices( + tenant_id=self.tenant_id, + user_id="unknown", + plugin_id=self.plugin_id, + provider=self.provider_name, + model=model, + credentials=credentials, + language=language, + ) diff --git a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py deleted file mode 100644 index 8701a38050..0000000000 --- a/api/core/model_runtime/model_providers/azure_openai/text_embedding/text_embedding.py +++ /dev/null @@ -1,191 +0,0 @@ -import base64 -import copy -import time -from typing import Optional, Union - -import numpy as np -import tiktoken -from openai import AzureOpenAI - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import AIModelEntity, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.azure_openai._common import _CommonAzureOpenAI -from core.model_runtime.model_providers.azure_openai._constant import EMBEDDING_BASE_MODELS, AzureBaseModel - - -class AzureOpenAITextEmbeddingModel(_CommonAzureOpenAI, TextEmbeddingModel): - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - base_model_name = credentials["base_model_name"] - credentials_kwargs = self._to_credential_kwargs(credentials) - client = AzureOpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - if user: - extra_model_kwargs["user"] = user - - extra_model_kwargs["encoding_format"] = "base64" - - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - embeddings: list[list[float]] = [[] for _ in range(len(texts))] - tokens = [] - indices = [] - used_tokens = 0 - - try: - enc = tiktoken.encoding_for_model(base_model_name) - except KeyError: - enc = tiktoken.get_encoding("cl100k_base") - - for i, text in enumerate(texts): - token = enc.encode(text) - for j in range(0, len(token), context_size): - tokens += [token[j : j + context_size]] - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(tokens), max_chunks) - - for i in _iter: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, client=client, texts=tokens[i : i + max_chunks], extra_model_kwargs=extra_model_kwargs - ) - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - results: list[list[list[float]]] = [[] for _ in range(len(texts))] - num_tokens_in_batch: list[list[int]] = [[] for _ in range(len(texts))] - for i in range(len(indices)): - results[indices[i]].append(batched_embeddings[i]) - num_tokens_in_batch[indices[i]].append(len(tokens[i])) - - for i in range(len(texts)): - _result = results[i] - if len(_result) == 0: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, client=client, texts="", extra_model_kwargs=extra_model_kwargs - ) - - used_tokens += embedding_used_tokens - average = embeddings_batch[0] - else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) - embeddings[i] = (average / np.linalg.norm(average)).tolist() - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=base_model_name) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - if len(texts) == 0: - return 0 - - try: - enc = tiktoken.encoding_for_model(credentials["base_model_name"]) - except KeyError: - enc = tiktoken.get_encoding("cl100k_base") - - total_num_tokens = 0 - for text in texts: - # calculate the number of tokens in the encoded text - tokenized_text = enc.encode(text) - total_num_tokens += len(tokenized_text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - if "openai_api_base" not in credentials: - raise CredentialsValidateFailedError("Azure OpenAI API Base Endpoint is required") - - if "openai_api_key" not in credentials: - raise CredentialsValidateFailedError("Azure OpenAI API key is required") - - if "base_model_name" not in credentials: - raise CredentialsValidateFailedError("Base Model Name is required") - - if not self._get_ai_model_entity(credentials["base_model_name"], model): - raise CredentialsValidateFailedError(f'Base Model Name {credentials["base_model_name"]} is invalid') - - try: - credentials_kwargs = self._to_credential_kwargs(credentials) - client = AzureOpenAI(**credentials_kwargs) - - self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={}) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - ai_model_entity = self._get_ai_model_entity(credentials["base_model_name"], model) - return ai_model_entity.entity - - @staticmethod - def _embedding_invoke( - model: str, client: AzureOpenAI, texts: Union[list[str], str], extra_model_kwargs: dict - ) -> tuple[list[list[float]], int]: - response = client.embeddings.create( - input=texts, - model=model, - **extra_model_kwargs, - ) - - if "encoding_format" in extra_model_kwargs and extra_model_kwargs["encoding_format"] == "base64": - # decode base64 embedding - return ( - [list(np.frombuffer(base64.b64decode(data.embedding), dtype="float32")) for data in response.data], - response.usage.total_tokens, - ) - - return [data.embedding for data in response.data], response.usage.total_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @staticmethod - def _get_ai_model_entity(base_model_name: str, model: str) -> AzureBaseModel: - for ai_model_entity in EMBEDDING_BASE_MODELS: - if ai_model_entity.base_model_name == base_model_name: - ai_model_entity_copy = copy.deepcopy(ai_model_entity) - ai_model_entity_copy.entity.model = model - ai_model_entity_copy.entity.label.en_US = model - ai_model_entity_copy.entity.label.zh_Hans = model - return ai_model_entity_copy - - return None diff --git a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py deleted file mode 100644 index 56b9be1c36..0000000000 --- a/api/core/model_runtime/model_providers/baichuan/text_embedding/text_embedding.py +++ /dev/null @@ -1,207 +0,0 @@ -import time -from json import dumps -from typing import Optional - -from requests import post - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.baichuan.llm.baichuan_tokenizer import BaichuanTokenizer -from core.model_runtime.model_providers.baichuan.llm.baichuan_turbo_errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - - -class BaichuanTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for BaiChuan text embedding model. - """ - - api_base: str = "http://api.baichuan-ai.com/v1/embeddings" - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - api_key = credentials["api_key"] - if model != "baichuan-text-embedding": - raise ValueError("Invalid model name") - if not api_key: - raise CredentialsValidateFailedError("api_key is required") - - # split into chunks of batch size 16 - chunks = [] - for i in range(0, len(texts), 16): - chunks.append(texts[i : i + 16]) - - embeddings = [] - token_usage = 0 - - for chunk in chunks: - # embedding chunk - chunk_embeddings, chunk_usage = self.embedding(model=model, api_key=api_key, texts=chunk, user=user) - - embeddings.extend(chunk_embeddings) - token_usage += chunk_usage - - result = TextEmbeddingResult( - model=model, - embeddings=embeddings, - usage=self._calc_response_usage(model=model, credentials=credentials, tokens=token_usage), - ) - - return result - - def embedding( - self, model: str, api_key, texts: list[str], user: Optional[str] = None - ) -> tuple[list[list[float]], int]: - """ - Embed given texts - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :return: embeddings result - """ - url = self.api_base - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - data = {"model": "Baichuan-Text-Embedding", "input": texts} - - try: - response = post(url, headers=headers, data=dumps(data)) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - try: - resp = response.json() - # try to parse error message - err = resp["error"]["code"] - msg = resp["error"]["message"] - except Exception as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {response.text}") - - if err == "invalid_api_key": - raise InvalidAPIKeyError(msg) - elif err == "insufficient_quota": - raise InsufficientAccountBalanceError(msg) - elif err == "invalid_authentication": - raise InvalidAuthenticationError(msg) - elif err and "rate" in err: - raise RateLimitReachedError(msg) - elif err and "internal" in err: - raise InternalServerError(msg) - elif err == "api_key_empty": - raise InvalidAPIKeyError(msg) - else: - raise InternalServerError(f"Unknown error: {err} with message: {msg}") - - try: - resp = response.json() - embeddings = resp["data"] - usage = resp["usage"] - except Exception as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {response.text}") - - return [data["embedding"] for data in embeddings], usage["total_tokens"] - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use BaichuanTokenizer to get num tokens - num_tokens += BaichuanTokenizer._get_num_tokens(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvalidAPIKeyError: - raise CredentialsValidateFailedError("Invalid api key") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py deleted file mode 100644 index 4da2080690..0000000000 --- a/api/core/model_runtime/model_providers/cohere/text_embedding/text_embedding.py +++ /dev/null @@ -1,223 +0,0 @@ -import time -from typing import Optional - -import cohere -import numpy as np -from cohere.core import RequestOptions - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - - -class CohereTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Cohere text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - # get model properties - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - embeddings: list[list[float]] = [[] for _ in range(len(texts))] - tokens = [] - indices = [] - used_tokens = 0 - - for i, text in enumerate(texts): - tokenize_response = self._tokenize(model=model, credentials=credentials, text=text) - - for j in range(0, len(tokenize_response), context_size): - tokens += [tokenize_response[j : j + context_size]] - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(tokens), max_chunks) - - for i in _iter: - # call embedding model - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, credentials=credentials, texts=["".join(token) for token in tokens[i : i + max_chunks]] - ) - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - results: list[list[list[float]]] = [[] for _ in range(len(texts))] - num_tokens_in_batch: list[list[int]] = [[] for _ in range(len(texts))] - for i in range(len(indices)): - results[indices[i]].append(batched_embeddings[i]) - num_tokens_in_batch[indices[i]].append(len(tokens[i])) - - for i in range(len(texts)): - _result = results[i] - if len(_result) == 0: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, credentials=credentials, texts=[" "] - ) - - used_tokens += embedding_used_tokens - average = embeddings_batch[0] - else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) - embeddings[i] = (average / np.linalg.norm(average)).tolist() - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - - full_text = " ".join(texts) - - try: - response = self._tokenize(model=model, credentials=credentials, text=full_text) - except Exception as e: - raise self._transform_invoke_error(e) - - return len(response) - - def _tokenize(self, model: str, credentials: dict, text: str) -> list[str]: - """ - Tokenize text - :param model: model name - :param credentials: model credentials - :param text: text to tokenize - :return: - """ - if not text: - return [] - - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - - response = client.tokenize(text=text, model=model, offline=False, request_options=RequestOptions(max_retries=0)) - - return response.token_strings - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # call embedding model - self._embedding_invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke(self, model: str, credentials: dict, texts: list[str]) -> tuple[list[list[float]], int]: - """ - Invoke embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: embeddings and used tokens - """ - # initialize client - client = cohere.Client(credentials.get("api_key"), base_url=credentials.get("base_url")) - - # call embedding model - response = client.embed( - texts=texts, - model=model, - input_type="search_document" if len(texts) > 1 else "search_query", - request_options=RequestOptions(max_retries=1), - ) - - return response.embeddings, int(response.meta.billed_units.input_tokens) - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [cohere.errors.service_unavailable_error.ServiceUnavailableError], - InvokeServerUnavailableError: [cohere.errors.internal_server_error.InternalServerError], - InvokeRateLimitError: [cohere.errors.too_many_requests_error.TooManyRequestsError], - InvokeAuthorizationError: [ - cohere.errors.unauthorized_error.UnauthorizedError, - cohere.errors.forbidden_error.ForbiddenError, - ], - InvokeBadRequestError: [ - cohere.core.api_error.ApiError, - cohere.errors.bad_request_error.BadRequestError, - cohere.errors.not_found_error.NotFoundError, - ], - } diff --git a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml b/api/core/model_runtime/model_providers/fireworks/fireworks.yaml deleted file mode 100644 index cdb87a55e9..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/fireworks.yaml +++ /dev/null @@ -1,30 +0,0 @@ -provider: fireworks -label: - zh_Hans: Fireworks AI - en_US: Fireworks AI -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#FCFDFF" -help: - title: - en_US: Get your API Key from Fireworks AI - zh_Hans: 从 Fireworks AI 获取 API Key - url: - en_US: https://fireworks.ai/account/api-keys -supported_model_types: - - llm - - text-embedding -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: fireworks_api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml deleted file mode 100644 index 31415a24fa..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-11b-vision-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p2-11b-vision-instruct -label: - zh_Hans: Llama 3.2 11B Vision Instruct - en_US: Llama 3.2 11B Vision Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.2' - output: '0.2' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml deleted file mode 100644 index c2fd77d256..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-1b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p2-1b-instruct -label: - zh_Hans: Llama 3.2 1B Instruct - en_US: Llama 3.2 1B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.1' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml deleted file mode 100644 index 4b3c459c7b..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-3b-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p2-3b-instruct -label: - zh_Hans: Llama 3.2 3B Instruct - en_US: Llama 3.2 3B Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.1' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml b/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml deleted file mode 100644 index 0aece7455d..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/llm/llama-v3p2-90b-vision-instruct.yaml +++ /dev/null @@ -1,46 +0,0 @@ -model: accounts/fireworks/models/llama-v3p2-90b-vision-instruct -label: - zh_Hans: Llama 3.2 90B Vision Instruct - en_US: Llama 3.2 90B Vision Instruct -model_type: llm -features: - - agent-thought - - tool-call -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - - name: max_tokens - use_template: max_tokens - - name: context_length_exceeded_behavior - default: None - label: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - help: - zh_Hans: 上下文长度超出行为 - en_US: Context Length Exceeded Behavior - type: string - options: - - None - - truncate - - error - - name: response_format - use_template: response_format -pricing: - input: '0.9' - output: '0.9' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml deleted file mode 100644 index d7c11691cf..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/UAE-Large-V1.yaml +++ /dev/null @@ -1,12 +0,0 @@ -model: WhereIsAI/UAE-Large-V1 -label: - zh_Hans: UAE-Large-V1 - en_US: UAE-Large-V1 -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 -pricing: - input: '0.008' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml deleted file mode 100644 index d09bafb4d3..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-base.yaml +++ /dev/null @@ -1,12 +0,0 @@ -model: thenlper/gte-base -label: - zh_Hans: GTE-base - en_US: GTE-base -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 -pricing: - input: '0.008' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml deleted file mode 100644 index c41fa2f9d3..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/gte-large.yaml +++ /dev/null @@ -1,12 +0,0 @@ -model: thenlper/gte-large -label: - zh_Hans: GTE-large - en_US: GTE-large -model_type: text-embedding -model_properties: - context_size: 512 - max_chunks: 1 -pricing: - input: '0.008' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml deleted file mode 100644 index c9098503d9..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.5.yaml +++ /dev/null @@ -1,12 +0,0 @@ -model: nomic-ai/nomic-embed-text-v1.5 -label: - zh_Hans: nomic-embed-text-v1.5 - en_US: nomic-embed-text-v1.5 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 16 -pricing: - input: '0.008' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml b/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml deleted file mode 100644 index 89078d3ff6..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/nomic-embed-text-v1.yaml +++ /dev/null @@ -1,12 +0,0 @@ -model: nomic-ai/nomic-embed-text-v1 -label: - zh_Hans: nomic-embed-text-v1 - en_US: nomic-embed-text-v1 -model_type: text-embedding -model_properties: - context_size: 8192 - max_chunks: 16 -pricing: - input: '0.008' - unit: '0.000001' - currency: 'USD' diff --git a/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py deleted file mode 100644 index cdce69ff38..0000000000 --- a/api/core/model_runtime/model_providers/fireworks/text_embedding/text_embedding.py +++ /dev/null @@ -1,151 +0,0 @@ -import time -from collections.abc import Mapping -from typing import Optional, Union - -import numpy as np -from openai import OpenAI - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.fireworks._common import _CommonFireworks - - -class FireworksTextEmbeddingModel(_CommonFireworks, TextEmbeddingModel): - """ - Model class for Fireworks text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - if user: - extra_model_kwargs["user"] = user - - extra_model_kwargs["encoding_format"] = "float" - - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - inputs = [] - indices = [] - used_tokens = 0 - - for i, text in enumerate(texts): - # Here token count is only an approximation based on the GPT2 tokenizer - # TODO: Optimize for better token estimation and chunking - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(np.floor(len(text) * (context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - - for i in _iter: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, - client=client, - texts=inputs[i : i + max_chunks], - extra_model_kwargs=extra_model_kwargs, - ) - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def validate_credentials(self, model: str, credentials: Mapping) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # call embedding model - self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={}) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke( - self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict - ) -> tuple[list[list[float]], int]: - """ - Invoke embedding model - :param model: model name - :param client: model client - :param texts: texts to embed - :param extra_model_kwargs: extra model kwargs - :return: embeddings and used tokens - """ - response = client.embeddings.create(model=model, input=texts, **extra_model_kwargs) - return [data.embedding for data in response.data], response.usage.total_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - input_price_info = self.get_price( - model=model, credentials=credentials, tokens=tokens, price_type=PriceType.INPUT - ) - - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/fishaudio/fishaudio.yaml b/api/core/model_runtime/model_providers/fishaudio/fishaudio.yaml deleted file mode 100644 index 479eb7fb85..0000000000 --- a/api/core/model_runtime/model_providers/fishaudio/fishaudio.yaml +++ /dev/null @@ -1,76 +0,0 @@ -provider: fishaudio -label: - en_US: Fish Audio -description: - en_US: Models provided by Fish Audio, currently only support TTS. - zh_Hans: Fish Audio 提供的模型,目前仅支持 TTS。 -icon_small: - en_US: fishaudio_s_en.svg -icon_large: - en_US: fishaudio_l_en.svg -background: "#E5E7EB" -help: - title: - en_US: Get your API key from Fish Audio - zh_Hans: 从 Fish Audio 获取你的 API Key - url: - en_US: https://fish.audio/go-api/ -supported_model_types: - - tts -configurate_methods: - - predefined-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: api_base - label: - en_US: API URL - type: text-input - required: false - default: https://api.fish.audio - placeholder: - en_US: Enter your API URL - zh_Hans: 在此输入您的 API URL - - variable: use_public_models - label: - en_US: Use Public Models - type: select - required: false - default: "false" - placeholder: - en_US: Toggle to use public models - zh_Hans: 切换以使用公共模型 - options: - - value: "true" - label: - en_US: Allow Public Models - zh_Hans: 使用公共模型 - - value: "false" - label: - en_US: Private Models Only - zh_Hans: 仅使用私有模型 - - variable: latency - label: - en_US: Latency - type: select - required: false - default: "normal" - placeholder: - en_US: Toggle to choice latency - zh_Hans: 切换以调整延迟 - options: - - value: "balanced" - label: - en_US: Low (may affect quality) - zh_Hans: 低延迟 (可能降低质量) - - value: "normal" - label: - en_US: Normal - zh_Hans: 标准 diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml deleted file mode 100644 index d84e9937e0..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-001.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-001 -label: - en_US: Gemini 1.5 Flash 001 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml deleted file mode 100644 index 2ff70564b2..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-002.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-002 -label: - en_US: Gemini 1.5 Flash 002 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml deleted file mode 100644 index 2aea8149f4..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash-8b-exp-0924.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash-8b-exp-0924 -label: - en_US: Gemini 1.5 Flash 8B 0924 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml deleted file mode 100644 index dfd55c3a94..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-flash.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-flash -label: - en_US: Gemini 1.5 Flash -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml deleted file mode 100644 index a1feff171d..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-001.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro-001 -label: - en_US: Gemini 1.5 Pro 001 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml deleted file mode 100644 index 9ae07a06c5..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro-002.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro-002 -label: - en_US: Gemini 1.5 Pro 002 -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml b/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml deleted file mode 100644 index bdd70b34a2..0000000000 --- a/api/core/model_runtime/model_providers/google/llm/gemini-1.5-pro.yaml +++ /dev/null @@ -1,48 +0,0 @@ -model: gemini-1.5-pro -label: - en_US: Gemini 1.5 Pro -model_type: llm -features: - - agent-thought - - vision - - tool-call - - stream-tool-call -model_properties: - mode: chat - context_size: 2097152 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - zh_Hans: 取样数量 - en_US: Top k - type: int - help: - zh_Hans: 仅从每个后续标记的前 K 个选项中采样。 - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: max_tokens_to_sample - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 - - name: response_format - use_template: response_format - - name: stream - label: - zh_Hans: 流式输出 - en_US: Stream - type: boolean - help: - zh_Hans: 流式输出允许模型在生成文本的过程中逐步返回结果,而不是一次性生成全部结果后再返回。 - en_US: Streaming output allows the model to return results incrementally as it generates text, rather than generating all the results at once. - default: false -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml deleted file mode 100644 index 019d453723..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-11b-text-preview.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.2-11b-text-preview -label: - zh_Hans: Llama 3.2 11B Text (Preview) - en_US: Llama 3.2 11B Text (Preview) -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml deleted file mode 100644 index a44e4ff508..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-1b-preview.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.2-1b-preview -label: - zh_Hans: Llama 3.2 1B Text (Preview) - en_US: Llama 3.2 1B Text (Preview) -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml deleted file mode 100644 index f2fdd0a05e..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-3b-preview.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.2-3b-preview -label: - zh_Hans: Llama 3.2 3B Text (Preview) - en_US: Llama 3.2 3B Text (Preview) -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml b/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml deleted file mode 100644 index 3b34e7c079..0000000000 --- a/api/core/model_runtime/model_providers/groq/llm/llama-3.2-90b-text-preview.yaml +++ /dev/null @@ -1,25 +0,0 @@ -model: llama-3.2-90b-text-preview -label: - zh_Hans: Llama 3.2 90B Text (Preview) - en_US: Llama 3.2 90B Text (Preview) -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: max_tokens - use_template: max_tokens - default: 512 - min: 1 - max: 8192 -pricing: - input: '0.05' - output: '0.1' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py deleted file mode 100644 index b2e6d1b652..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_hub/text_embedding/text_embedding.py +++ /dev/null @@ -1,189 +0,0 @@ -import json -import time -from typing import Optional - -import numpy as np -import requests -from huggingface_hub import HfApi, InferenceClient - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.huggingface_hub._common import _CommonHuggingfaceHub - -HUGGINGFACE_ENDPOINT_API = "https://api.endpoints.huggingface.cloud/v2/endpoint/" - - -class HuggingfaceHubTextEmbeddingModel(_CommonHuggingfaceHub, TextEmbeddingModel): - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - client = InferenceClient(token=credentials["huggingfacehub_api_token"]) - - execute_model = model - - if credentials["huggingfacehub_api_type"] == "inference_endpoints": - execute_model = credentials["huggingfacehub_endpoint_url"] - - output = client.post( - json={"inputs": texts, "options": {"wait_for_model": False, "use_cache": False}}, model=execute_model - ) - - embeddings = json.loads(output.decode()) - - tokens = self.get_num_tokens(model, credentials, texts) - usage = self._calc_response_usage(model, credentials, tokens) - - return TextEmbeddingResult(embeddings=self._mean_pooling(embeddings), usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - num_tokens = 0 - for text in texts: - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - try: - if "huggingfacehub_api_type" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type must be provided.") - - if "huggingfacehub_api_token" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub API Token must be provided.") - - if credentials["huggingfacehub_api_type"] == "inference_endpoints": - if "huggingface_namespace" not in credentials: - raise CredentialsValidateFailedError( - "Huggingface Hub User Name / Organization Name must be provided." - ) - - if "huggingfacehub_endpoint_url" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint URL must be provided.") - - if "task_type" not in credentials: - raise CredentialsValidateFailedError("Huggingface Hub Task Type must be provided.") - - if credentials["task_type"] != "feature-extraction": - raise CredentialsValidateFailedError("Huggingface Hub Task Type is invalid.") - - self._check_endpoint_url_model_repository_name(credentials, model) - - model = credentials["huggingfacehub_endpoint_url"] - - elif credentials["huggingfacehub_api_type"] == "hosted_inference_api": - self._check_hosted_model_task_type(credentials["huggingfacehub_api_token"], model) - else: - raise CredentialsValidateFailedError("Huggingface Hub Endpoint Type is invalid.") - - client = InferenceClient(token=credentials["huggingfacehub_api_token"]) - client.feature_extraction(text="hello world", model=model) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={"context_size": 10000, "max_chunks": 1}, - ) - return entity - - # https://huggingface.co/docs/api-inference/detailed_parameters#feature-extraction-task - # Returned values are a list of floats, or a list[list[floats]] - # (depending on if you sent a string or a list of string, - # and if the automatic reduction, usually mean_pooling for instance was applied for you or not. - # This should be explained on the model's README.) - @staticmethod - def _mean_pooling(embeddings: list) -> list[float]: - # If automatic reduction by giving model, no need to mean_pooling. - # For example one: List[List[float]] - if not isinstance(embeddings[0][0], list): - return embeddings - - # For example two: List[List[List[float]]], need to mean_pooling. - sentence_embeddings = [np.mean(embedding[0], axis=0).tolist() for embedding in embeddings] - return sentence_embeddings - - @staticmethod - def _check_hosted_model_task_type(huggingfacehub_api_token: str, model_name: str) -> None: - hf_api = HfApi(token=huggingfacehub_api_token) - model_info = hf_api.model_info(repo_id=model_name) - - try: - if not model_info: - raise ValueError(f"Model {model_name} not found.") - - if "inference" in model_info.cardData and not model_info.cardData["inference"]: - raise ValueError(f"Inference API has been turned off for this model {model_name}.") - - valid_tasks = "feature-extraction" - if model_info.pipeline_tag not in valid_tasks: - raise ValueError(f"Model {model_name} is not a valid task, must be one of {valid_tasks}.") - except Exception as e: - raise CredentialsValidateFailedError(f"{str(e)}") - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @staticmethod - def _check_endpoint_url_model_repository_name(credentials: dict, model_name: str): - try: - url = f'{HUGGINGFACE_ENDPOINT_API}{credentials["huggingface_namespace"]}' - headers = { - "Authorization": f'Bearer {credentials["huggingfacehub_api_token"]}', - "Content-Type": "application/json", - } - - response = requests.get(url=url, headers=headers) - - if response.status_code != 200: - raise ValueError("User Name or Organization Name is invalid.") - - model_repository_name = "" - - for item in response.json().get("items", []): - if item.get("status", {}).get("url") == credentials["huggingfacehub_endpoint_url"]: - model_repository_name = item.get("model", {}).get("repository") - break - - if model_repository_name != model_name: - raise ValueError( - f"Model Name {model_name} is invalid. Please check it on the inference endpoints console." - ) - - except Exception as e: - raise ValueError(str(e)) diff --git a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py deleted file mode 100644 index b8ff3ca549..0000000000 --- a/api/core/model_runtime/model_providers/huggingface_tei/text_embedding/text_embedding.py +++ /dev/null @@ -1,209 +0,0 @@ -import time -from typing import Optional - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiHelper - - -class HuggingfaceTeiTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Text Embedding Inference text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - credentials should be like: - { - 'server_url': 'server url', - 'model_uid': 'model uid', - } - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - server_url = credentials["server_url"] - - server_url = server_url.removesuffix("/") - - # get model properties - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - inputs = [] - indices = [] - used_tokens = 0 - - # get tokenized results from TEI - batched_tokenize_result = TeiHelper.invoke_tokenize(server_url, texts) - - for i, (text, tokenize_result) in enumerate(zip(texts, batched_tokenize_result)): - # Check if the number of tokens is larger than the context size - num_tokens = len(tokenize_result) - - if num_tokens >= context_size: - # Find the best cutoff point - pre_special_token_count = 0 - for token in tokenize_result: - if token["special"]: - pre_special_token_count += 1 - else: - break - rest_special_token_count = ( - len([token for token in tokenize_result if token["special"]]) - pre_special_token_count - ) - - # Calculate the cutoff point, leave 20 extra space to avoid exceeding the limit - token_cutoff = context_size - rest_special_token_count - 20 - - # Find the cutoff index - cutpoint_token = tokenize_result[token_cutoff] - cutoff = cutpoint_token["start"] - - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - - try: - used_tokens = 0 - for i in _iter: - iter_texts = inputs[i : i + max_chunks] - results = TeiHelper.invoke_embeddings(server_url, iter_texts) - embeddings = results["data"] - embeddings = [embedding["embedding"] for embedding in embeddings] - batched_embeddings.extend(embeddings) - - usage = results["usage"] - used_tokens += usage["total_tokens"] - except RuntimeError as e: - raise InvokeServerUnavailableError(str(e)) - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - result = TextEmbeddingResult(model=model, embeddings=batched_embeddings, usage=usage) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - server_url = credentials["server_url"] - - server_url = server_url.removesuffix("/") - - batch_tokens = TeiHelper.invoke_tokenize(server_url, texts) - num_tokens = sum(len(tokens) for tokens in batch_tokens) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - server_url = credentials["server_url"] - extra_args = TeiHelper.get_tei_extra_parameter(server_url, model) - print(extra_args) - if extra_args.model_type != "embedding": - raise CredentialsValidateFailedError("Current model is not a embedding model") - - credentials["context_size"] = extra_args.max_input_length - credentials["max_chunks"] = extra_args.max_client_batch_size - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.MAX_CHUNKS: int(credentials.get("max_chunks", 1)), - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", 512)), - }, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py deleted file mode 100644 index 75701ebc54..0000000000 --- a/api/core/model_runtime/model_providers/hunyuan/text_embedding/text_embedding.py +++ /dev/null @@ -1,169 +0,0 @@ -import json -import logging -import time -from typing import Optional - -from tencentcloud.common import credential -from tencentcloud.common.exception import TencentCloudSDKException -from tencentcloud.common.profile.client_profile import ClientProfile -from tencentcloud.common.profile.http_profile import HttpProfile -from tencentcloud.hunyuan.v20230901 import hunyuan_client, models - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - -logger = logging.getLogger(__name__) - - -class HunyuanTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Hunyuan text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - if model != "hunyuan-embedding": - raise ValueError("Invalid model name") - - client = self._setup_hunyuan_client(credentials) - - embeddings = [] - token_usage = 0 - - for input in texts: - request = models.GetEmbeddingRequest() - params = {"Input": input} - request.from_json_string(json.dumps(params)) - response = client.GetEmbedding(request) - usage = response.Usage.TotalTokens - - embeddings.extend([data.Embedding for data in response.Data]) - token_usage += usage - - result = TextEmbeddingResult( - model=model, - embeddings=embeddings, - usage=self._calc_response_usage(model=model, credentials=credentials, tokens=token_usage), - ) - - return result - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate credentials - """ - try: - client = self._setup_hunyuan_client(credentials) - - req = models.ChatCompletionsRequest() - params = { - "Model": model, - "Messages": [{"Role": "user", "Content": "hello"}], - "TopP": 1, - "Temperature": 0, - "Stream": False, - } - req.from_json_string(json.dumps(params)) - client.ChatCompletions(req) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - def _setup_hunyuan_client(self, credentials): - secret_id = credentials["secret_id"] - secret_key = credentials["secret_key"] - cred = credential.Credential(secret_id, secret_key) - httpProfile = HttpProfile() - httpProfile.endpoint = "hunyuan.tencentcloudapi.com" - clientProfile = ClientProfile() - clientProfile.httpProfile = httpProfile - client = hunyuan_client.HunyuanClient(cred, "", clientProfile) - return client - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeError: [TencentCloudSDKException], - } - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - # client = self._setup_hunyuan_client(credentials) - - num_tokens = 0 - for text in texts: - num_tokens += self._get_num_tokens_by_gpt2(text) - # use client.GetTokenCount to get num tokens - # request = models.GetTokenCountRequest() - # params = { - # "Prompt": text - # } - # request.from_json_string(json.dumps(params)) - # response = client.GetTokenCount(request) - # num_tokens += response.TokenCount - - return num_tokens diff --git a/api/core/model_runtime/model_providers/jina/jina.yaml b/api/core/model_runtime/model_providers/jina/jina.yaml deleted file mode 100644 index 970b22965b..0000000000 --- a/api/core/model_runtime/model_providers/jina/jina.yaml +++ /dev/null @@ -1,69 +0,0 @@ -provider: jina -label: - en_US: Jina AI -description: - en_US: Embedding and Rerank Model Supported -icon_small: - en_US: icon_s_en.svg -icon_large: - en_US: icon_l_en.svg -background: "#EFFDFD" -help: - title: - en_US: Get your API key from Jina AI - zh_Hans: 从 Jina AI 获取 API Key - url: - en_US: https://jina.ai/ -supported_model_types: - - text-embedding - - rerank -configurate_methods: - - predefined-model - - customizable-model -provider_credential_schema: - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key -model_credential_schema: - model: - label: - en_US: Model Name - zh_Hans: 模型名称 - placeholder: - en_US: Enter your model name - zh_Hans: 输入模型名称 - credential_form_schemas: - - variable: api_key - label: - en_US: API Key - type: secret-input - required: true - placeholder: - zh_Hans: 在此输入您的 API Key - en_US: Enter your API Key - - variable: base_url - label: - zh_Hans: 服务器 URL - en_US: Base URL - type: text-input - required: true - placeholder: - zh_Hans: Base URL, e.g. https://api.jina.ai/v1 - en_US: Base URL, e.g. https://api.jina.ai/v1 - default: 'https://api.jina.ai/v1' - - variable: context_size - label: - zh_Hans: 上下文大小 - en_US: Context size - placeholder: - zh_Hans: 输入上下文大小 - en_US: Enter context size - required: false - type: text-input - default: '8192' diff --git a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py deleted file mode 100644 index b397129512..0000000000 --- a/api/core/model_runtime/model_providers/jina/text_embedding/text_embedding.py +++ /dev/null @@ -1,199 +0,0 @@ -import time -from json import JSONDecodeError, dumps -from typing import Optional - -from requests import post - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.jina.text_embedding.jina_tokenizer import JinaTokenizer - - -class JinaTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Jina text embedding model. - """ - - api_base: str = "https://api.jina.ai/v1" - - def _to_payload(self, model: str, texts: list[str], credentials: dict, input_type: EmbeddingInputType) -> dict: - """ - Parse model credentials - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: parsed credentials - """ - - def transform_jina_input_text(model, text): - if model == "jina-clip-v1": - return {"text": text} - return text - - data = {"model": model, "input": [transform_jina_input_text(model, text) for text in texts]} - - # model specific parameters - if model == "jina-embeddings-v3": - # set `task` type according to input type for the best performance - data["task"] = "retrieval.query" if input_type == EmbeddingInputType.QUERY else "retrieval.passage" - - return data - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - api_key = credentials["api_key"] - if not api_key: - raise CredentialsValidateFailedError("api_key is required") - - base_url = credentials.get("base_url", self.api_base) - base_url = base_url.removesuffix("/") - - url = base_url + "/embeddings" - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - data = self._to_payload(model=model, texts=texts, credentials=credentials, input_type=input_type) - - try: - response = post(url, headers=headers, data=dumps(data)) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - try: - resp = response.json() - msg = resp["detail"] - if response.status_code == 401: - raise InvokeAuthorizationError(msg) - elif response.status_code == 429: - raise InvokeRateLimitError(msg) - elif response.status_code == 500: - raise InvokeServerUnavailableError(msg) - else: - raise InvokeBadRequestError(msg) - except JSONDecodeError as e: - raise InvokeServerUnavailableError( - f"Failed to convert response to json: {e} with text: {response.text}" - ) - - try: - resp = response.json() - embeddings = resp["data"] - usage = resp["usage"] - except Exception as e: - raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) - - result = TextEmbeddingResult( - model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage - ) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use JinaTokenizer to get num tokens - num_tokens += JinaTokenizer.get_num_tokens(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError, InvokeBadRequestError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size"))}, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py deleted file mode 100644 index ab8ca76c2f..0000000000 --- a/api/core/model_runtime/model_providers/localai/text_embedding/text_embedding.py +++ /dev/null @@ -1,189 +0,0 @@ -import time -from json import JSONDecodeError, dumps -from typing import Optional - -from requests import post -from yarl import URL - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - - -class LocalAITextEmbeddingModel(TextEmbeddingModel): - """ - Model class for LocalAI text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - if len(texts) != 1: - raise InvokeBadRequestError("Only one text is supported") - - server_url = credentials["server_url"] - model_name = model - if not server_url: - raise CredentialsValidateFailedError("server_url is required") - if not model_name: - raise CredentialsValidateFailedError("model_name is required") - - url = server_url - headers = {"Authorization": "Bearer 123", "Content-Type": "application/json"} - - data = {"model": model_name, "input": texts[0]} - - try: - response = post(str(URL(url) / "embeddings"), headers=headers, data=dumps(data), timeout=10) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - try: - resp = response.json() - code = resp["error"]["code"] - msg = resp["error"]["message"] - if code == 500: - raise InvokeServerUnavailableError(msg) - - if response.status_code == 401: - raise InvokeAuthorizationError(msg) - elif response.status_code == 429: - raise InvokeRateLimitError(msg) - elif response.status_code == 500: - raise InvokeServerUnavailableError(msg) - else: - raise InvokeError(msg) - except JSONDecodeError as e: - raise InvokeServerUnavailableError( - f"Failed to convert response to json: {e} with text: {response.text}" - ) - - try: - resp = response.json() - embeddings = resp["data"] - usage = resp["usage"] - except Exception as e: - raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) - - result = TextEmbeddingResult( - model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage - ) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use GPT2Tokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def _get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - Get customizable model schema - - :param model: model name - :param credentials: model credentials - :return: model schema - """ - return AIModelEntity( - model=model, - label=I18nObject(zh_Hans=model, en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - features=[], - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512")), - ModelPropertyKey.MAX_CHUNKS: 1, - }, - parameter_rules=[], - ) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvokeAuthorizationError: - raise CredentialsValidateFailedError("Invalid credentials") - except InvokeConnectionError as e: - raise CredentialsValidateFailedError(f"Invalid credentials: {e}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py deleted file mode 100644 index 74d2a221d1..0000000000 --- a/api/core/model_runtime/model_providers/minimax/text_embedding/text_embedding.py +++ /dev/null @@ -1,184 +0,0 @@ -import time -from json import dumps -from typing import Optional - -from requests import post - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.minimax.llm.errors import ( - BadRequestError, - InsufficientAccountBalanceError, - InternalServerError, - InvalidAPIKeyError, - InvalidAuthenticationError, - RateLimitReachedError, -) - - -class MinimaxTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Minimax text embedding model. - """ - - api_base: str = "https://api.minimax.chat/v1/embeddings" - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - api_key = credentials["minimax_api_key"] - group_id = credentials["minimax_group_id"] - if model != "embo-01": - raise ValueError("Invalid model name") - if not api_key: - raise CredentialsValidateFailedError("api_key is required") - url = f"{self.api_base}?GroupId={group_id}" - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - data = {"model": "embo-01", "texts": texts, "type": "db"} - - try: - response = post(url, headers=headers, data=dumps(data)) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - raise InvokeServerUnavailableError(response.text) - - try: - resp = response.json() - # check if there is an error - if resp["base_resp"]["status_code"] != 0: - code = resp["base_resp"]["status_code"] - msg = resp["base_resp"]["status_msg"] - self._handle_error(code, msg) - - embeddings = resp["vectors"] - total_tokens = resp["total_tokens"] - except InvalidAuthenticationError: - raise InvalidAPIKeyError("Invalid api key") - except KeyError as e: - raise InternalServerError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=total_tokens) - - result = TextEmbeddingResult(model=model, embeddings=embeddings, usage=usage) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use MinimaxTokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvalidAPIKeyError: - raise CredentialsValidateFailedError("Invalid api key") - - def _handle_error(self, code: int, msg: str): - if code in {1000, 1001}: - raise InternalServerError(msg) - elif code == 1002: - raise RateLimitReachedError(msg) - elif code == 1004: - raise InvalidAuthenticationError(msg) - elif code == 1008: - raise InsufficientAccountBalanceError(msg) - elif code == 2013: - raise BadRequestError(msg) - else: - raise InternalServerError(msg) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [InternalServerError], - InvokeRateLimitError: [RateLimitReachedError], - InvokeAuthorizationError: [ - InvalidAuthenticationError, - InsufficientAccountBalanceError, - InvalidAPIKeyError, - ], - InvokeBadRequestError: [BadRequestError, KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py deleted file mode 100644 index 68b7b448bf..0000000000 --- a/api/core/model_runtime/model_providers/mixedbread/text_embedding/text_embedding.py +++ /dev/null @@ -1,170 +0,0 @@ -import time -from json import JSONDecodeError, dumps -from typing import Optional - -import requests - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - - -class MixedBreadTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for MixedBread text embedding model. - """ - - api_base: str = "https://api.mixedbread.ai/v1" - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - api_key = credentials["api_key"] - if not api_key: - raise CredentialsValidateFailedError("api_key is required") - - base_url = credentials.get("base_url", self.api_base) - base_url = base_url.removesuffix("/") - - url = base_url + "/embeddings" - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - data = {"model": model, "input": texts} - - try: - response = requests.post(url, headers=headers, data=dumps(data)) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - try: - resp = response.json() - msg = resp["detail"] - if response.status_code == 401: - raise InvokeAuthorizationError(msg) - elif response.status_code == 429: - raise InvokeRateLimitError(msg) - elif response.status_code == 500: - raise InvokeServerUnavailableError(msg) - else: - raise InvokeBadRequestError(msg) - except JSONDecodeError as e: - raise InvokeServerUnavailableError( - f"Failed to convert response to json: {e} with text: {response.text}" - ) - - try: - resp = response.json() - embeddings = resp["data"] - usage = resp["usage"] - except Exception as e: - raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) - - result = TextEmbeddingResult( - model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage - ) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError, InvokeBadRequestError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size", "512"))}, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/model_provider_factory.py b/api/core/model_runtime/model_providers/model_provider_factory.py index 1370676f0e..f2c6d6a650 100644 --- a/api/core/model_runtime/model_providers/model_provider_factory.py +++ b/api/core/model_runtime/model_providers/model_provider_factory.py @@ -13,7 +13,6 @@ from core.model_runtime.model_providers.__base.large_language_model import Large from core.model_runtime.model_providers.__base.moderation_model import ModerationModel from core.model_runtime.model_providers.__base.rerank_model import RerankModel from core.model_runtime.model_providers.__base.speech2text_model import Speech2TextModel -from core.model_runtime.model_providers.__base.text2img_model import Text2ImageModel from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel from core.model_runtime.model_providers.__base.tts_model import TTSModel from core.model_runtime.schema_validators.model_credential_schema_validator import ModelCredentialSchemaValidator @@ -284,8 +283,6 @@ class ModelProviderFactory: return ModerationModel(**init_params) elif model_type == ModelType.TTS: return TTSModel(**init_params) - elif model_type == ModelType.TEXT2IMG: - return Text2ImageModel(**init_params) def get_provider_icon(self, provider: str, icon_type: str, lang: str) -> bytes: """ diff --git a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py deleted file mode 100644 index 857dfb5f41..0000000000 --- a/api/core/model_runtime/model_providers/nomic/text_embedding/text_embedding.py +++ /dev/null @@ -1,165 +0,0 @@ -import time -from functools import wraps -from typing import Optional - -from nomic import embed -from nomic import login as nomic_login - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import ( - EmbeddingUsage, - TextEmbeddingResult, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import ( - TextEmbeddingModel, -) -from core.model_runtime.model_providers.nomic._common import _CommonNomic - - -def nomic_login_required(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - if not kwargs.get("credentials"): - raise ValueError("missing credentials parameters") - credentials = kwargs.get("credentials") - if "nomic_api_key" not in credentials: - raise ValueError("missing nomic_api_key in credentials parameters") - # nomic login - nomic_login(credentials["nomic_api_key"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - return func(*args, **kwargs) - - return wrapper - - -class NomicTextEmbeddingModel(_CommonNomic, TextEmbeddingModel): - """ - Model class for nomic text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - embeddings, prompt_tokens, total_tokens = self.embed_text( - model=model, - credentials=credentials, - texts=texts, - ) - - # calc usage - usage = self._calc_response_usage( - model=model, credentials=credentials, tokens=prompt_tokens, total_tokens=total_tokens - ) - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # call embedding model - self.embed_text(model=model, credentials=credentials, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @nomic_login_required - def embed_text(self, model: str, credentials: dict, texts: list[str]) -> tuple[list[list[float]], int, int]: - """Call out to Nomic's embedding endpoint. - - Args: - model: The model to use for embedding. - texts: The list of texts to embed. - - Returns: - List of embeddings, one for each text, and tokens usage. - """ - embeddings: list[list[float]] = [] - prompt_tokens = 0 - total_tokens = 0 - - response = embed.text( - model=model, - texts=texts, - ) - - if not (response and "embeddings" in response): - raise ValueError("Embedding data is missing in the response.") - - if not (response and "usage" in response): - raise ValueError("Response usage is missing.") - - if "prompt_tokens" not in response["usage"]: - raise ValueError("Response usage does not contain prompt tokens.") - - if "total_tokens" not in response["usage"]: - raise ValueError("Response usage does not contain total tokens.") - - embeddings = [list(map(float, e)) for e in response["embeddings"]] - total_tokens = response["usage"]["total_tokens"] - prompt_tokens = response["usage"]["prompt_tokens"] - return embeddings, prompt_tokens, total_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int, total_tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: prompt tokens - :param total_tokens: total tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, - credentials=credentials, - price_type=PriceType.INPUT, - tokens=tokens, - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=total_tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py deleted file mode 100644 index 936ceb8dd2..0000000000 --- a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py +++ /dev/null @@ -1,158 +0,0 @@ -import time -from json import JSONDecodeError, dumps -from typing import Optional - -from requests import post - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - - -class NvidiaTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Nvidia text embedding model. - """ - - api_base: str = "https://ai.api.nvidia.com/v1/retrieval/nvidia/embeddings" - models: list[str] = ["NV-Embed-QA"] - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - api_key = credentials["api_key"] - if model not in self.models: - raise InvokeBadRequestError("Invalid model name") - if not api_key: - raise CredentialsValidateFailedError("api_key is required") - url = self.api_base - headers = {"Authorization": "Bearer " + api_key, "Content-Type": "application/json"} - - data = {"model": model, "input": texts[0], "input_type": "query"} - - try: - response = post(url, headers=headers, data=dumps(data)) - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - try: - resp = response.json() - msg = resp["detail"] - if response.status_code == 401: - raise InvokeAuthorizationError(msg) - elif response.status_code == 429: - raise InvokeRateLimitError(msg) - elif response.status_code == 500: - raise InvokeServerUnavailableError(msg) - else: - raise InvokeError(msg) - except JSONDecodeError as e: - raise InvokeServerUnavailableError( - f"Failed to convert response to json: {e} with text: {response.text}" - ) - - try: - resp = response.json() - embeddings = resp["data"] - usage = resp["usage"] - except Exception as e: - raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) - - result = TextEmbeddingResult( - model=model, embeddings=[[float(data) for data in x["embedding"]] for x in embeddings], usage=usage - ) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use JinaTokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvokeAuthorizationError: - raise CredentialsValidateFailedError("Invalid api key") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py deleted file mode 100644 index 4de9296cca..0000000000 --- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py +++ /dev/null @@ -1,224 +0,0 @@ -import base64 -import copy -import time -from typing import Optional - -import numpy as np -import oci - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - -request_template = { - "compartmentId": "", - "servingMode": {"modelId": "cohere.embed-english-light-v3.0", "servingType": "ON_DEMAND"}, - "truncate": "NONE", - "inputs": [""], -} -oci_config_template = { - "user": "", - "fingerprint": "", - "tenancy": "", - "region": "", - "compartment_id": "", - "key_content": "", -} - - -class OCITextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Cohere text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - # get model properties - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - inputs = [] - indices = [] - used_tokens = 0 - - for i, text in enumerate(texts): - # Here token count is only an approximation based on the GPT2 tokenizer - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(len(text) * (np.floor(context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - - for i in _iter: - # call embedding model - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, credentials=credentials, texts=inputs[i : i + max_chunks] - ) - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def get_num_characters(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - characters = 0 - for text in texts: - characters += len(text) - return characters - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # call embedding model - self._embedding_invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke(self, model: str, credentials: dict, texts: list[str]) -> tuple[list[list[float]], int]: - """ - Invoke embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: embeddings and used tokens - """ - - # oci - # initialize client - oci_config = copy.deepcopy(oci_config_template) - if "oci_config_content" in credentials: - oci_config_content = base64.b64decode(credentials.get("oci_config_content")).decode("utf-8") - config_items = oci_config_content.split("/") - if len(config_items) != 5: - raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode(" - "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" - ) - oci_config["user"] = config_items[0] - oci_config["fingerprint"] = config_items[1] - oci_config["tenancy"] = config_items[2] - oci_config["region"] = config_items[3] - oci_config["compartment_id"] = config_items[4] - else: - raise CredentialsValidateFailedError("need to set oci_config_content in credentials ") - if "oci_key_content" in credentials: - oci_key_content = base64.b64decode(credentials.get("oci_key_content")).decode("utf-8") - oci_config["key_content"] = oci_key_content.encode(encoding="utf-8") - else: - raise CredentialsValidateFailedError("need to set oci_config_content in credentials ") - # oci_config = oci.config.from_file('~/.oci/config', credentials.get('oci_api_profile')) - compartment_id = oci_config["compartment_id"] - client = oci.generative_ai_inference.GenerativeAiInferenceClient(config=oci_config) - # call embedding model - request_args = copy.deepcopy(request_template) - request_args["compartmentId"] = compartment_id - request_args["servingMode"]["modelId"] = model - request_args["inputs"] = texts - response = client.embed_text(request_args) - return response.data.embeddings, self.get_num_characters(model=model, credentials=credentials, texts=texts) - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } diff --git a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py deleted file mode 100644 index 5cf3f1c6fa..0000000000 --- a/api/core/model_runtime/model_providers/ollama/text_embedding/text_embedding.py +++ /dev/null @@ -1,211 +0,0 @@ -import json -import logging -import time -from decimal import Decimal -from typing import Optional -from urllib.parse import urljoin - -import numpy as np -import requests - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - PriceConfig, - PriceType, -) -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - -logger = logging.getLogger(__name__) - - -class OllamaEmbeddingModel(TextEmbeddingModel): - """ - Model class for an Ollama text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - # Prepare headers and payload for the request - headers = {"Content-Type": "application/json"} - - endpoint_url = credentials.get("base_url") - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - endpoint_url = urljoin(endpoint_url, "api/embed") - - # get model properties - context_size = self._get_context_size(model, credentials) - - inputs = [] - used_tokens = 0 - - for text in texts: - # Here token count is only an approximation based on the GPT2 tokenizer - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(np.floor(len(text) * (context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - - # Prepare the payload for the request - payload = {"input": inputs, "model": model, "options": {"use_mmap": True}} - - # Make the request to the Ollama API - response = requests.post(endpoint_url, headers=headers, data=json.dumps(payload), timeout=(10, 300)) - - response.raise_for_status() # Raise an exception for HTTP errors - response_data = response.json() - - # Extract embeddings and used tokens from the response - embeddings = response_data["embeddings"] - embedding_used_tokens = self.get_num_tokens(model, credentials, inputs) - - used_tokens += embedding_used_tokens - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Approximate number of tokens for given messages using GPT2 tokenizer - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvokeError as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {ex.description}") - except Exception as ex: - raise CredentialsValidateFailedError(f"An error occurred during credentials validation: {str(ex)}") - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), - ModelPropertyKey.MAX_CHUNKS: 1, - }, - parameter_rules=[], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - ) - - return entity - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeAuthorizationError: [ - requests.exceptions.InvalidHeader, # Missing or Invalid API Key - ], - InvokeBadRequestError: [ - requests.exceptions.HTTPError, # Invalid Endpoint URL or model name - requests.exceptions.InvalidURL, # Misconfigured request or other API error - ], - InvokeRateLimitError: [ - requests.exceptions.RetryError # Too many requests sent in a short period of time - ], - InvokeServerUnavailableError: [ - requests.exceptions.ConnectionError, # Engine Overloaded - requests.exceptions.HTTPError, # Server Error - ], - InvokeConnectionError: [ - requests.exceptions.ConnectTimeout, # Timeout - requests.exceptions.ReadTimeout, # Timeout - ], - } diff --git a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py deleted file mode 100644 index 16f1a0cfa1..0000000000 --- a/api/core/model_runtime/model_providers/openai/text_embedding/text_embedding.py +++ /dev/null @@ -1,203 +0,0 @@ -import base64 -import time -from typing import Optional, Union - -import numpy as np -import tiktoken -from openai import OpenAI - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.openai._common import _CommonOpenAI - - -class OpenAITextEmbeddingModel(_CommonOpenAI, TextEmbeddingModel): - """ - Model class for OpenAI text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - # init model client - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - if user: - extra_model_kwargs["user"] = user - - extra_model_kwargs["encoding_format"] = "base64" - - # get model properties - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - embeddings: list[list[float]] = [[] for _ in range(len(texts))] - tokens = [] - indices = [] - used_tokens = 0 - - try: - enc = tiktoken.encoding_for_model(model) - except KeyError: - enc = tiktoken.get_encoding("cl100k_base") - - for i, text in enumerate(texts): - token = enc.encode(text) - for j in range(0, len(token), context_size): - tokens += [token[j : j + context_size]] - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(tokens), max_chunks) - - for i in _iter: - # call embedding model - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, client=client, texts=tokens[i : i + max_chunks], extra_model_kwargs=extra_model_kwargs - ) - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - results: list[list[list[float]]] = [[] for _ in range(len(texts))] - num_tokens_in_batch: list[list[int]] = [[] for _ in range(len(texts))] - for i in range(len(indices)): - results[indices[i]].append(batched_embeddings[i]) - num_tokens_in_batch[indices[i]].append(len(tokens[i])) - - for i in range(len(texts)): - _result = results[i] - if len(_result) == 0: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, client=client, texts="", extra_model_kwargs=extra_model_kwargs - ) - - used_tokens += embedding_used_tokens - average = embeddings_batch[0] - else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) - embeddings[i] = (average / np.linalg.norm(average)).tolist() - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - - try: - enc = tiktoken.encoding_for_model(model) - except KeyError: - enc = tiktoken.get_encoding("cl100k_base") - - total_num_tokens = 0 - for text in texts: - # calculate the number of tokens in the encoded text - tokenized_text = enc.encode(text) - total_num_tokens += len(tokenized_text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # call embedding model - self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={}) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke( - self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict - ) -> tuple[list[list[float]], int]: - """ - Invoke embedding model - - :param model: model name - :param client: model client - :param texts: texts to embed - :param extra_model_kwargs: extra model kwargs - :return: embeddings and used tokens - """ - # call embedding model - response = client.embeddings.create( - input=texts, - model=model, - **extra_model_kwargs, - ) - - if "encoding_format" in extra_model_kwargs and extra_model_kwargs["encoding_format"] == "base64": - # decode base64 embedding - return ( - [list(np.frombuffer(base64.b64decode(data.embedding), dtype="float32")) for data in response.data], - response.usage.total_tokens, - ) - - return [data.embedding for data in response.data], response.usage.total_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py deleted file mode 100644 index 64fa6aaa3c..0000000000 --- a/api/core/model_runtime/model_providers/openai_api_compatible/text_embedding/text_embedding.py +++ /dev/null @@ -1,217 +0,0 @@ -import json -import time -from decimal import Decimal -from typing import Optional -from urllib.parse import urljoin - -import numpy as np -import requests - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - PriceConfig, - PriceType, -) -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.openai_api_compatible._common import _CommonOaiApiCompat - - -class OAICompatEmbeddingModel(_CommonOaiApiCompat, TextEmbeddingModel): - """ - Model class for an OpenAI API-compatible text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - # Prepare headers and payload for the request - headers = {"Content-Type": "application/json"} - - api_key = credentials.get("api_key") - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials.get("endpoint_url") - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - endpoint_url = urljoin(endpoint_url, "embeddings") - - extra_model_kwargs = {} - if user: - extra_model_kwargs["user"] = user - - extra_model_kwargs["encoding_format"] = "float" - - # get model properties - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - inputs = [] - indices = [] - used_tokens = 0 - - for i, text in enumerate(texts): - # Here token count is only an approximation based on the GPT2 tokenizer - # TODO: Optimize for better token estimation and chunking - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(np.floor(len(text) * (context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - - for i in _iter: - # Prepare the payload for the request - payload = {"input": inputs[i : i + max_chunks], "model": model, **extra_model_kwargs} - - # Make the request to the OpenAI API - response = requests.post(endpoint_url, headers=headers, data=json.dumps(payload), timeout=(10, 300)) - - response.raise_for_status() # Raise an exception for HTTP errors - response_data = response.json() - - # Extract embeddings and used tokens from the response - embeddings_batch = [data["embedding"] for data in response_data["data"]] - embedding_used_tokens = response_data["usage"]["total_tokens"] - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Approximate number of tokens for given messages using GPT2 tokenizer - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return sum(self._get_num_tokens_by_gpt2(text) for text in texts) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - headers = {"Content-Type": "application/json"} - - api_key = credentials.get("api_key") - - if api_key: - headers["Authorization"] = f"Bearer {api_key}" - - endpoint_url = credentials.get("endpoint_url") - if not endpoint_url.endswith("/"): - endpoint_url += "/" - - endpoint_url = urljoin(endpoint_url, "embeddings") - - payload = {"input": "ping", "model": model} - - response = requests.post(url=endpoint_url, headers=headers, data=json.dumps(payload), timeout=(10, 300)) - - if response.status_code != 200: - raise CredentialsValidateFailedError( - f"Credentials validation failed with status code {response.status_code}" - ) - - try: - json_result = response.json() - except json.JSONDecodeError as e: - raise CredentialsValidateFailedError("Credentials validation failed: JSON decode error") - - if "model" not in json_result: - raise CredentialsValidateFailedError("Credentials validation failed: invalid response") - except CredentialsValidateFailedError: - raise - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), - ModelPropertyKey.MAX_CHUNKS: 1, - }, - parameter_rules=[], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - ) - - return entity - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py deleted file mode 100644 index c5d4330912..0000000000 --- a/api/core/model_runtime/model_providers/openllm/text_embedding/text_embedding.py +++ /dev/null @@ -1,155 +0,0 @@ -import time -from json import dumps -from typing import Optional - -from requests import post -from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - - -class OpenLLMTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for OpenLLM text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - server_url = credentials["server_url"] - if not server_url: - raise CredentialsValidateFailedError("server_url is required") - - headers = {"Content-Type": "application/json", "accept": "application/json"} - - url = f"{server_url}/v1/embeddings" - - data = texts - try: - response = post(url, headers=headers, data=dumps(data)) - except (ConnectionError, InvalidSchema, MissingSchema) as e: - # cloud not connect to the server - raise InvokeAuthorizationError(f"Invalid server URL: {e}") - except Exception as e: - raise InvokeConnectionError(str(e)) - - if response.status_code != 200: - if response.status_code == 400: - raise InvokeBadRequestError(response.text) - elif response.status_code == 404: - raise InvokeAuthorizationError(response.text) - elif response.status_code == 500: - raise InvokeServerUnavailableError(response.text) - - try: - resp = response.json()[0] - embeddings = resp["embeddings"] - total_tokens = resp["num_tokens"] - except KeyError as e: - raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=total_tokens) - - result = TextEmbeddingResult(model=model, embeddings=embeddings, usage=usage) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use GPT2Tokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvokeAuthorizationError: - raise CredentialsValidateFailedError("Invalid server_url") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py deleted file mode 100644 index 9f724a77ac..0000000000 --- a/api/core/model_runtime/model_providers/replicate/text_embedding/text_embedding.py +++ /dev/null @@ -1,152 +0,0 @@ -import json -import time -from typing import Optional - -from replicate import Client as ReplicateClient - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.replicate._common import _CommonReplicate - - -class ReplicateEmbeddingModel(_CommonReplicate, TextEmbeddingModel): - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) - - if "model_version" in credentials: - model_version = credentials["model_version"] - else: - model_info = client.models.get(model) - model_version = model_info.latest_version.id - - replicate_model_version = f"{model}:{model_version}" - - text_input_key = self._get_text_input_key(model, model_version, client) - - embeddings = self._generate_embeddings_by_text_input_key(client, replicate_model_version, text_input_key, texts) - - tokens = self.get_num_tokens(model, credentials, texts) - usage = self._calc_response_usage(model, credentials, tokens) - - return TextEmbeddingResult(model=model, embeddings=embeddings, usage=usage) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - num_tokens = 0 - for text in texts: - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - if "replicate_api_token" not in credentials: - raise CredentialsValidateFailedError("Replicate Access Token must be provided.") - - try: - client = ReplicateClient(api_token=credentials["replicate_api_token"], timeout=30) - - if "model_version" in credentials: - model_version = credentials["model_version"] - else: - model_info = client.models.get(model) - model_version = model_info.latest_version.id - - replicate_model_version = f"{model}:{model_version}" - - text_input_key = self._get_text_input_key(model, model_version, client) - - self._generate_embeddings_by_text_input_key( - client, replicate_model_version, text_input_key, ["Hello worlds!"] - ) - except Exception as e: - raise CredentialsValidateFailedError(str(e)) - - def get_customizable_model_schema(self, model: str, credentials: dict) -> Optional[AIModelEntity]: - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={"context_size": 4096, "max_chunks": 1}, - ) - return entity - - @staticmethod - def _get_text_input_key(model: str, model_version: str, client: ReplicateClient) -> str: - model_info = client.models.get(model) - model_info_version = model_info.versions.get(model_version) - - # sort through the openapi schema to get the name of text, texts or inputs - input_properties = sorted( - model_info_version.openapi_schema["components"]["schemas"]["Input"]["properties"].items(), - key=lambda item: item[1].get("x-order", 0), - ) - - for input_property in input_properties: - if input_property[0] in {"text", "texts", "inputs"}: - text_input_key = input_property[0] - return text_input_key - - return "" - - @staticmethod - def _generate_embeddings_by_text_input_key( - client: ReplicateClient, replicate_model_version: str, text_input_key: str, texts: list[str] - ) -> list[list[float]]: - if text_input_key in {"text", "inputs"}: - embeddings = [] - for text in texts: - result = client.run(replicate_model_version, input={text_input_key: text}) - embeddings.append(result[0].get("embedding")) - - return [list(map(float, e)) for e in embeddings] - elif "texts" == text_input_key: - result = client.run( - replicate_model_version, - input={ - "texts": json.dumps(texts), - "batch_size": 4, - "convert_to_numpy": False, - "normalize_embeddings": True, - }, - ) - return result - else: - raise ValueError(f"embeddings input key is invalid: {text_input_key}") - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py b/api/core/model_runtime/model_providers/sagemaker/llm/llm.py deleted file mode 100644 index 97b7692044..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/llm/llm.py +++ /dev/null @@ -1,463 +0,0 @@ -import json -import logging -import re -from collections.abc import Generator, Iterator -from typing import Any, Optional, Union, cast - -# from openai.types.chat import ChatCompletion, ChatCompletionChunk -import boto3 -from sagemaker import Predictor, serializers -from sagemaker.session import Session - -from core.model_runtime.entities.llm_entities import LLMMode, LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContent, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - I18nObject, - ModelFeature, - ModelPropertyKey, - ModelType, - ParameterRule, - ParameterType, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - - -def inference(predictor, messages: list[dict[str, Any]], params: dict[str, Any], stop: list, stream=False): - """ - params: - predictor : Sagemaker Predictor - messages (List[Dict[str,Any]]): message list。 - messages = [ - {"role": "system", "content":"please answer in Chinese"}, - {"role": "user", "content": "who are you? what are you doing?"}, - ] - params (Dict[str,Any]): model parameters for LLM。 - stream (bool): False by default。 - - response: - result of inference if stream is False - Iterator of Chunks if stream is True - """ - payload = { - "model": params.get("model_name"), - "stop": stop, - "messages": messages, - "stream": stream, - "max_tokens": params.get("max_new_tokens", params.get("max_tokens", 2048)), - "temperature": params.get("temperature", 0.1), - "top_p": params.get("top_p", 0.9), - } - - if not stream: - response = predictor.predict(payload) - return response - else: - response_stream = predictor.predict_stream(payload) - return response_stream - - -class SageMakerLargeLanguageModel(LargeLanguageModel): - """ - Model class for Cohere large language model. - """ - - sagemaker_session: Any = None - predictor: Any = None - sagemaker_endpoint: str = None - - def _handle_chat_generate_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: bytes, - ) -> LLMResult: - """ - handle normal chat generate response - """ - resp_obj = json.loads(resp.decode("utf-8")) - resp_str = resp_obj.get("choices")[0].get("message").get("content") - - if len(resp_str) == 0: - raise InvokeServerUnavailableError("Empty response") - - assistant_prompt_message = AssistantPromptMessage(content=resp_str, tool_calls=[]) - - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages(messages=[assistant_prompt_message], tools=tools) - - usage = self._calc_response_usage( - model=model, credentials=credentials, prompt_tokens=prompt_tokens, completion_tokens=completion_tokens - ) - - response = LLMResult( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=None, - usage=usage, - message=assistant_prompt_message, - ) - - return response - - def _handle_chat_stream_response( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: list[PromptMessageTool], - resp: Iterator[bytes], - ) -> Generator: - """ - handle stream chat generate response - """ - full_response = "" - buffer = "" - for chunk_bytes in resp: - buffer += chunk_bytes.decode("utf-8") - last_idx = 0 - for match in re.finditer(r"^data:\s*(.+?)(\n\n)", buffer): - try: - data = json.loads(match.group(1).strip()) - last_idx = match.span()[1] - - if "content" in data["choices"][0]["delta"]: - chunk_content = data["choices"][0]["delta"]["content"] - assistant_prompt_message = AssistantPromptMessage(content=chunk_content, tool_calls=[]) - - if data["choices"][0]["finish_reason"] is not None: - temp_assistant_prompt_message = AssistantPromptMessage(content=full_response, tool_calls=[]) - prompt_tokens = self._num_tokens_from_messages(messages=prompt_messages, tools=tools) - completion_tokens = self._num_tokens_from_messages( - messages=[temp_assistant_prompt_message], tools=[] - ) - usage = self._calc_response_usage( - model=model, - credentials=credentials, - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=None, - delta=LLMResultChunkDelta( - index=0, - message=assistant_prompt_message, - finish_reason=data["choices"][0]["finish_reason"], - usage=usage, - ), - ) - else: - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - system_fingerprint=None, - delta=LLMResultChunkDelta(index=0, message=assistant_prompt_message), - ) - - full_response += chunk_content - except (json.JSONDecodeError, KeyError, IndexError) as e: - logger.info("json parse exception, content: {}".format(match.group(1).strip())) - pass - - buffer = buffer[last_idx:] - - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - if not self.sagemaker_session: - access_key = credentials.get("aws_access_key_id") - secret_key = credentials.get("aws_secret_access_key") - aws_region = credentials.get("aws_region") - boto_session = None - if aws_region: - if access_key and secret_key: - boto_session = boto3.Session( - aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=aws_region - ) - else: - boto_session = boto3.Session(region_name=aws_region) - else: - boto_session = boto3.Session() - - sagemaker_client = boto_session.client("sagemaker") - self.sagemaker_session = Session(boto_session=boto_session, sagemaker_client=sagemaker_client) - - if self.sagemaker_endpoint != credentials.get("sagemaker_endpoint"): - self.sagemaker_endpoint = credentials.get("sagemaker_endpoint") - self.predictor = Predictor( - endpoint_name=self.sagemaker_endpoint, - sagemaker_session=self.sagemaker_session, - serializer=serializers.JSONSerializer(), - ) - - messages: list[dict[str, Any]] = [{"role": p.role.value, "content": p.content} for p in prompt_messages] - response = inference( - predictor=self.predictor, messages=messages, params=model_parameters, stop=stop, stream=stream - ) - - if stream: - if tools and len(tools) > 0: - raise InvokeBadRequestError(f"{model}'s tool calls does not support stream mode") - - return self._handle_chat_stream_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=response - ) - return self._handle_chat_generate_response( - model=model, credentials=credentials, prompt_messages=prompt_messages, tools=tools, resp=response - ) - - def _convert_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict for OpenAI Compatibility API - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(PromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - sub_message_dict = { - "type": "image_url", - "image_url": {"url": message_content.data, "detail": message_content.detail.value}, - } - sub_messages.append(sub_message_dict) - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - if message.tool_calls and len(message.tool_calls) > 0: - message_dict["function_call"] = { - "name": message.tool_calls[0].function.name, - "arguments": message.tool_calls[0].function.arguments, - } - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, ToolPromptMessage): - message = cast(ToolPromptMessage, message) - message_dict = {"tool_call_id": message.tool_call_id, "role": "tool", "content": message.content} - else: - raise ValueError(f"Unknown message type {type(message)}") - - return message_dict - - def _num_tokens_from_messages( - self, messages: list[PromptMessage], tools: list[PromptMessageTool], is_completion_model: bool = False - ) -> int: - def tokens(text: str): - return self._get_num_tokens_by_gpt2(text) - - if is_completion_model: - return sum(tokens(str(message.content)) for message in messages) - - tokens_per_message = 3 - tokens_per_name = 1 - - num_tokens = 0 - messages_dict = [self._convert_prompt_message_to_dict(m) for m in messages] - for message in messages_dict: - num_tokens += tokens_per_message - for key, value in message.items(): - if isinstance(value, list): - text = "" - for item in value: - if isinstance(item, dict) and item["type"] == "text": - text += item["text"] - - value = text - - if key == "tool_calls": - for tool_call in value: - for t_key, t_value in tool_call.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - if key == "function_call": - for t_key, t_value in value.items(): - num_tokens += tokens(t_key) - if t_key == "function": - for f_key, f_value in t_value.items(): - num_tokens += tokens(f_key) - num_tokens += tokens(f_value) - else: - num_tokens += tokens(t_key) - num_tokens += tokens(t_value) - else: - num_tokens += tokens(str(value)) - - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 - - if tools: - num_tokens += self._num_tokens_for_tools(tools) - - return num_tokens - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - # get model mode - try: - return self._num_tokens_from_messages(prompt_messages, tools) - except Exception as e: - raise self._transform_invoke_error(e) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # get model mode - pass - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [InvokeBadRequestError, KeyError, ValueError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - rules = [ - ParameterRule( - name="temperature", - type=ParameterType.FLOAT, - use_template="temperature", - label=I18nObject(zh_Hans="温度", en_US="Temperature"), - ), - ParameterRule( - name="top_p", - type=ParameterType.FLOAT, - use_template="top_p", - label=I18nObject(zh_Hans="Top P", en_US="Top P"), - ), - ParameterRule( - name="max_tokens", - type=ParameterType.INT, - use_template="max_tokens", - min=1, - max=credentials.get("context_length", 2048), - default=512, - label=I18nObject(zh_Hans="最大生成长度", en_US="Max Tokens"), - ), - ] - - completion_type = LLMMode.value_of(credentials["mode"]).value - - features = [] - - support_function_call = credentials.get("support_function_call", False) - if support_function_call: - features.append(ModelFeature.TOOL_CALL) - - support_vision = credentials.get("support_vision", False) - if support_vision: - features.append(ModelFeature.VISION) - - context_length = credentials.get("context_length", 2048) - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.LLM, - features=features, - model_properties={ModelPropertyKey.MODE: completion_type, ModelPropertyKey.CONTEXT_SIZE: context_length}, - parameter_rules=rules, - ) - - return entity diff --git a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py deleted file mode 100644 index 8f993ce672..0000000000 --- a/api/core/model_runtime/model_providers/sagemaker/text_embedding/text_embedding.py +++ /dev/null @@ -1,200 +0,0 @@ -import itertools -import json -import logging -import time -from typing import Any, Optional - -import boto3 - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel - -BATCH_SIZE = 20 -CONTEXT_SIZE = 8192 - -logger = logging.getLogger(__name__) - - -def batch_generator(generator, batch_size): - while True: - batch = list(itertools.islice(generator, batch_size)) - if not batch: - break - yield batch - - -class SageMakerEmbeddingModel(TextEmbeddingModel): - """ - Model class for Cohere text embedding model. - """ - - sagemaker_client: Any = None - - def _sagemaker_embedding(self, sm_client, endpoint_name, content_list: list[str]): - response_model = sm_client.invoke_endpoint( - EndpointName=endpoint_name, - Body=json.dumps({"inputs": content_list, "parameters": {}, "is_query": False, "instruction": ""}), - ContentType="application/json", - ) - json_str = response_model["Body"].read().decode("utf8") - json_obj = json.loads(json_str) - embeddings = json_obj["embeddings"] - return embeddings - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - # get model properties - try: - line = 1 - if not self.sagemaker_client: - access_key = credentials.get("aws_access_key_id") - secret_key = credentials.get("aws_secret_access_key") - aws_region = credentials.get("aws_region") - if aws_region: - if access_key and secret_key: - self.sagemaker_client = boto3.client( - "sagemaker-runtime", - aws_access_key_id=access_key, - aws_secret_access_key=secret_key, - region_name=aws_region, - ) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime", region_name=aws_region) - else: - self.sagemaker_client = boto3.client("sagemaker-runtime") - - line = 2 - sagemaker_endpoint = credentials.get("sagemaker_endpoint") - - line = 3 - truncated_texts = [item[:CONTEXT_SIZE] for item in texts] - - batches = batch_generator((text for text in truncated_texts), batch_size=BATCH_SIZE) - all_embeddings = [] - - line = 4 - for batch in batches: - embeddings = self._sagemaker_embedding(self.sagemaker_client, sagemaker_endpoint, batch) - all_embeddings.extend(embeddings) - - line = 5 - # calc usage - usage = self._calc_response_usage( - model=model, - credentials=credentials, - tokens=0, # It's not SAAS API, usage is meaningless - ) - line = 6 - - return TextEmbeddingResult(embeddings=all_embeddings, usage=usage, model=model) - - except Exception as e: - logger.exception(f"Exception {e}, line : {line}") - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - return 0 - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - print("validate_credentials ok....") - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: CONTEXT_SIZE, - ModelPropertyKey.MAX_CHUNKS: BATCH_SIZE, - }, - parameter_rules=[], - ) - - return entity diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml deleted file mode 100644 index 8d1df82140..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/_position.yaml +++ /dev/null @@ -1,28 +0,0 @@ -- Qwen/Qwen2.5-72B-Instruct -- Qwen/Qwen2.5-32B-Instruct -- Qwen/Qwen2.5-14B-Instruct -- Qwen/Qwen2.5-7B-Instruct -- Qwen/Qwen2.5-Coder-7B-Instruct -- Qwen/Qwen2.5-Math-72B-Instruct -- Qwen/Qwen2-72B-Instruct -- Qwen/Qwen2-57B-A14B-Instruct -- Qwen/Qwen2-7B-Instruct -- Qwen/Qwen2-1.5B-Instruct -- deepseek-ai/DeepSeek-V2.5 -- deepseek-ai/DeepSeek-V2-Chat -- deepseek-ai/DeepSeek-Coder-V2-Instruct -- THUDM/glm-4-9b-chat -- 01-ai/Yi-1.5-34B-Chat-16K -- 01-ai/Yi-1.5-9B-Chat-16K -- 01-ai/Yi-1.5-6B-Chat -- internlm/internlm2_5-20b-chat -- internlm/internlm2_5-7b-chat -- meta-llama/Meta-Llama-3.1-405B-Instruct -- meta-llama/Meta-Llama-3.1-70B-Instruct -- meta-llama/Meta-Llama-3.1-8B-Instruct -- meta-llama/Meta-Llama-3-70B-Instruct -- meta-llama/Meta-Llama-3-8B-Instruct -- google/gemma-2-27b-it -- google/gemma-2-9b-it -- mistralai/Mistral-7B-Instruct-v0.2 -- mistralai/Mixtral-8x7B-Instruct-v0.1 diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-20b-chat.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-20b-chat.yaml deleted file mode 100644 index d9663582e5..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/internlm2_5-20b-chat.yaml +++ /dev/null @@ -1,30 +0,0 @@ -model: internlm/internlm2_5-20b-chat -label: - en_US: internlm/internlm2_5-20b-chat -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 32768 -parameter_rules: - - name: temperature - use_template: temperature - - name: max_tokens - use_template: max_tokens - type: int - default: 512 - min: 1 - max: 4096 - help: - zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 - en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. - - name: top_p - use_template: top_p - - name: frequency_penalty - use_template: frequency_penalty -pricing: - input: '1' - output: '1' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-coder-7b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-coder-7b-instruct.yaml deleted file mode 100644 index 76526200cc..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-coder-7b-instruct.yaml +++ /dev/null @@ -1,74 +0,0 @@ -model: Qwen/Qwen2.5-Coder-7B-Instruct -label: - en_US: Qwen/Qwen2.5-Coder-7B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 131072 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 8192 - min: 1 - max: 8192 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '0' - output: '0' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-math-72b-instruct.yaml b/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-math-72b-instruct.yaml deleted file mode 100644 index 90afa0cfd5..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/llm/qwen2.5-math-72b-instruct.yaml +++ /dev/null @@ -1,74 +0,0 @@ -model: Qwen/Qwen2.5-Math-72B-Instruct -label: - en_US: Qwen/Qwen2.5-Math-72B-Instruct -model_type: llm -features: - - agent-thought -model_properties: - mode: chat - context_size: 4096 -parameter_rules: - - name: temperature - use_template: temperature - type: float - default: 0.3 - min: 0.0 - max: 2.0 - help: - zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 - en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. - - name: max_tokens - use_template: max_tokens - type: int - default: 2000 - min: 1 - max: 2000 - help: - zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 - en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - - name: top_p - use_template: top_p - type: float - default: 0.8 - min: 0.1 - max: 0.9 - help: - zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 - en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: top_k - type: int - min: 0 - max: 99 - label: - zh_Hans: 取样数量 - en_US: Top k - help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - - name: seed - required: false - type: int - default: 1234 - label: - zh_Hans: 随机种子 - en_US: Random seed - help: - zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 - en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - - name: repetition_penalty - required: false - type: float - default: 1.1 - label: - zh_Hans: 重复惩罚 - en_US: Repetition penalty - help: - zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. - - name: response_format - use_template: response_format -pricing: - input: '4.13' - output: '4.13' - unit: '0.000001' - currency: RMB diff --git a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py deleted file mode 100644 index c5dcc12610..0000000000 --- a/api/core/model_runtime/model_providers/siliconflow/text_embedding/text_embedding.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import Optional - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import ( - OAICompatEmbeddingModel, -) - - -class SiliconflowTextEmbeddingModel(OAICompatEmbeddingModel): - """ - Model class for Siliconflow text embedding model. - """ - - def validate_credentials(self, model: str, credentials: dict) -> None: - self._add_custom_parameters(credentials) - super().validate_credentials(model, credentials) - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - self._add_custom_parameters(credentials) - return super()._invoke(model, credentials, texts, user) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - self._add_custom_parameters(credentials) - return super().get_num_tokens(model, credentials, texts) - - @classmethod - def _add_custom_parameters(cls, credentials: dict) -> None: - credentials["endpoint_url"] = "https://api.siliconflow.cn/v1" diff --git a/api/core/model_runtime/model_providers/spark/llm/llm.py b/api/core/model_runtime/model_providers/spark/llm/llm.py deleted file mode 100644 index 1181ba699a..0000000000 --- a/api/core/model_runtime/model_providers/spark/llm/llm.py +++ /dev/null @@ -1,309 +0,0 @@ -import threading -from collections.abc import Generator -from typing import Optional, Union - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -from ._client import SparkLLMClient - - -class SparkLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke model - return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user) - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return: - """ - prompt = self._convert_messages_to_prompt(prompt_messages) - - return self._get_num_tokens_by_gpt2(prompt) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - self._generate( - model=model, - credentials=credentials, - prompt_messages=[ - UserPromptMessage(content="ping"), - ], - model_parameters={ - "temperature": 0.5, - }, - stream=False, - ) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - extra_model_kwargs = {} - if stop: - extra_model_kwargs["stop_sequences"] = stop - - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - client = SparkLLMClient( - model=model, - **credentials_kwargs, - ) - - thread = threading.Thread( - target=client.run, - args=( - [ - {"role": prompt_message.role.value, "content": prompt_message.content} - for prompt_message in prompt_messages - ], - user, - model_parameters, - stream, - ), - ) - thread.start() - - if stream: - return self._handle_generate_stream_response(thread, model, credentials, client, prompt_messages) - - return self._handle_generate_response(thread, model, credentials, client, prompt_messages) - - def _handle_generate_response( - self, - thread: threading.Thread, - model: str, - credentials: dict, - client: SparkLLMClient, - prompt_messages: list[PromptMessage], - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - completion = "" - - for content in client.subscribe(): - if isinstance(content, dict): - delta = content["data"] - else: - delta = content - - completion += delta - - thread.join() - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=completion) - - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, - thread: threading.Thread, - model: str, - credentials: dict, - client: SparkLLMClient, - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm stream response - - :param thread: thread - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - completion = "" - for index, content in enumerate(client.subscribe()): - if isinstance(content, dict): - delta = content["data"] - else: - delta = content - completion += delta - assistant_prompt_message = AssistantPromptMessage( - content=delta or "", - ) - temp_assistant_prompt_message = AssistantPromptMessage( - content=completion, - ) - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [temp_assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message, usage=usage), - ) - - thread.join() - - def _to_credential_kwargs(self, credentials: dict) -> dict: - """ - Transform credentials to kwargs for model instance - - :param credentials: - :return: - """ - credentials_kwargs = { - "app_id": credentials["app_id"], - "api_secret": credentials["api_secret"], - "api_key": credentials["api_key"], - } - - return credentials_kwargs - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nHuman:" - ai_prompt = "\n\nAssistant:" - content = message.content - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage): - message_text = content - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Anthropic model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - # trim off the trailing ' ' that might come from the "Assistant: " - return text.rstrip() - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: [], - InvokeServerUnavailableError: [], - InvokeRateLimitError: [], - InvokeAuthorizationError: [], - InvokeBadRequestError: [], - } diff --git a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py deleted file mode 100644 index 736cd44df8..0000000000 --- a/api/core/model_runtime/model_providers/tongyi/text_embedding/text_embedding.py +++ /dev/null @@ -1,177 +0,0 @@ -import time -from typing import Optional - -import dashscope -import numpy as np - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import ( - EmbeddingUsage, - TextEmbeddingResult, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import ( - TextEmbeddingModel, -) -from core.model_runtime.model_providers.tongyi._common import _CommonTongyi - - -class TongyiTextEmbeddingModel(_CommonTongyi, TextEmbeddingModel): - """ - Model class for Tongyi text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - credentials_kwargs = self._to_credential_kwargs(credentials) - - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - inputs = [] - indices = [] - used_tokens = 0 - - for i, text in enumerate(texts): - # Here token count is only an approximation based on the GPT2 tokenizer - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(np.floor(len(text) * (context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - - for i in _iter: - embeddings_batch, embedding_used_tokens = self.embed_documents( - credentials_kwargs=credentials_kwargs, - model=model, - texts=inputs[i : i + max_chunks], - ) - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - return TextEmbeddingResult(embeddings=batched_embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - total_num_tokens = 0 - for text in texts: - total_num_tokens += self._get_num_tokens_by_gpt2(text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - - # call embedding model - self.embed_documents(credentials_kwargs=credentials_kwargs, model=model, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - @staticmethod - def embed_documents(credentials_kwargs: dict, model: str, texts: list[str]) -> tuple[list[list[float]], int]: - """Call out to Tongyi's embedding endpoint. - - Args: - credentials_kwargs: The credentials to use for the call. - model: The model to use for embedding. - texts: The list of texts to embed. - - Returns: - List of embeddings, one for each text, and tokens usage. - """ - embeddings = [] - embedding_used_tokens = 0 - for text in texts: - response = dashscope.TextEmbedding.call( - api_key=credentials_kwargs["dashscope_api_key"], - model=model, - input=text, - text_type="document", - ) - if response.output and "embeddings" in response.output and response.output["embeddings"]: - data = response.output["embeddings"][0] - if "embedding" in data: - embeddings.append(data["embedding"]) - else: - raise ValueError("Embedding data is missing in the response.") - else: - raise ValueError("Response output is missing or does not contain embeddings.") - - if response.usage and "total_tokens" in response.usage: - embedding_used_tokens += response.usage["total_tokens"] - else: - raise ValueError("Response usage is missing or does not contain total tokens.") - - return [list(map(float, e)) for e in embeddings], embedding_used_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, - credentials=credentials, - price_type=PriceType.INPUT, - tokens=tokens, - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py deleted file mode 100644 index b6509cd26c..0000000000 --- a/api/core/model_runtime/model_providers/upstage/text_embedding/text_embedding.py +++ /dev/null @@ -1,197 +0,0 @@ -import base64 -import time -from collections.abc import Mapping -from typing import Union - -import numpy as np -from openai import OpenAI -from tokenizers import Tokenizer - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.upstage._common import _CommonUpstage - - -class UpstageTextEmbeddingModel(_CommonUpstage, TextEmbeddingModel): - """ - Model class for Upstage text embedding model. - """ - - def _get_tokenizer(self) -> Tokenizer: - return Tokenizer.from_pretrained("upstage/solar-1-mini-tokenizer") - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: str | None = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - extra_model_kwargs = {} - if user: - extra_model_kwargs["user"] = user - extra_model_kwargs["encoding_format"] = "base64" - - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - - embeddings: list[list[float]] = [[] for _ in range(len(texts))] - tokens = [] - indices = [] - used_tokens = 0 - - tokenizer = self._get_tokenizer() - - for i, text in enumerate(texts): - token = tokenizer.encode(text, add_special_tokens=False).tokens - for j in range(0, len(token), context_size): - tokens += [token[j : j + context_size]] - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(tokens), max_chunks) - - for i in _iter: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, - client=client, - texts=tokens[i : i + max_chunks], - extra_model_kwargs=extra_model_kwargs, - ) - - used_tokens += embedding_used_tokens - batched_embeddings += embeddings_batch - - results: list[list[list[float]]] = [[] for _ in range(len(texts))] - num_tokens_in_batch: list[list[int]] = [[] for _ in range(len(texts))] - - for i in range(len(indices)): - results[indices[i]].append(batched_embeddings[i]) - num_tokens_in_batch[indices[i]].append(len(tokens[i])) - - for i in range(len(texts)): - _result = results[i] - if len(_result) == 0: - embeddings_batch, embedding_used_tokens = self._embedding_invoke( - model=model, - client=client, - texts=[texts[i]], - extra_model_kwargs=extra_model_kwargs, - ) - used_tokens += embedding_used_tokens - average = embeddings_batch[0] - else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) - embeddings[i] = (average / np.linalg.norm(average)).tolist() - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=used_tokens) - - return TextEmbeddingResult(embeddings=embeddings, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - tokenizer = self._get_tokenizer() - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - - tokenizer = self._get_tokenizer() - - total_num_tokens = 0 - for text in texts: - # calculate the number of tokens in the encoded text - tokenized_text = tokenizer.encode(text) - total_num_tokens += len(tokenized_text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: Mapping) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - # transform credentials to kwargs for model instance - credentials_kwargs = self._to_credential_kwargs(credentials) - client = OpenAI(**credentials_kwargs) - - # call embedding model - self._embedding_invoke(model=model, client=client, texts=["ping"], extra_model_kwargs={}) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke( - self, model: str, client: OpenAI, texts: Union[list[str], str], extra_model_kwargs: dict - ) -> tuple[list[list[float]], int]: - """ - Invoke embedding model - :param model: model name - :param client: model client - :param texts: texts to embed - :param extra_model_kwargs: extra model kwargs - :return: embeddings and used tokens - """ - response = client.embeddings.create(model=model, input=texts, **extra_model_kwargs) - - if "encoding_format" in extra_model_kwargs and extra_model_kwargs["encoding_format"] == "base64": - return ( - [ - list(np.frombuffer(base64.b64decode(embedding.embedding), dtype=np.float32)) - for embedding in response.data - ], - response.usage.total_tokens, - ) - - return [data.embedding for data in response.data], response.usage.total_tokens - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - input_price_info = self.get_price( - model=model, credentials=credentials, tokens=tokens, price_type=PriceType.INPUT - ) - - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml deleted file mode 100644 index f5386be06d..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-001.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-1.5-flash-001 -label: - en_US: Gemini 1.5 Flash 001 -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml deleted file mode 100644 index 97bd44f06b..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-flash-002.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-1.5-flash-002 -label: - en_US: Gemini 1.5 Flash 002 -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml deleted file mode 100644 index 5e08f2294e..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-001.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-1.5-pro-001 -label: - en_US: Gemini 1.5 Pro 001 -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml deleted file mode 100644 index 8f327ea2f3..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-1.5-pro-002.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-1.5-pro-002 -label: - en_US: Gemini 1.5 Pro 002 -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml deleted file mode 100644 index 0f5eb34c0c..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-flash-experimental.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-flash-experimental -label: - en_US: Gemini Flash Experimental -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml b/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml deleted file mode 100644 index fa31cabb85..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/gemini-pro-experimental.yaml +++ /dev/null @@ -1,37 +0,0 @@ -model: gemini-pro-experimental -label: - en_US: Gemini Pro Experimental -model_type: llm -features: - - agent-thought - - vision -model_properties: - mode: chat - context_size: 1048576 -parameter_rules: - - name: temperature - use_template: temperature - - name: top_p - use_template: top_p - - name: top_k - label: - en_US: Top k - type: int - help: - en_US: Only sample from the top K options for each subsequent token. - required: false - - name: presence_penalty - use_template: presence_penalty - - name: frequency_penalty - use_template: frequency_penalty - - name: max_output_tokens - use_template: max_tokens - required: true - default: 8192 - min: 1 - max: 8192 -pricing: - input: '0.00' - output: '0.00' - unit: '0.000001' - currency: USD diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py deleted file mode 100644 index 1dd785d545..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ /dev/null @@ -1,733 +0,0 @@ -import base64 -import io -import json -import logging -import time -from collections.abc import Generator -from typing import Optional, Union, cast - -import google.auth.transport.requests -import vertexai.generative_models as glm -from anthropic import AnthropicVertex, Stream -from anthropic.types import ( - ContentBlockDeltaEvent, - Message, - MessageDeltaEvent, - MessageStartEvent, - MessageStopEvent, - MessageStreamEvent, -) -from google.api_core import exceptions -from google.cloud import aiplatform -from google.oauth2 import service_account -from PIL import Image - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessage, - PromptMessageContentType, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel - -logger = logging.getLogger(__name__) - - -class VertexAiLargeLanguageModel(LargeLanguageModel): - def _invoke( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param tools: tools for tool calling - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - # invoke anthropic models via anthropic official SDK - if "claude" in model: - return self._generate_anthropic(model, credentials, prompt_messages, model_parameters, stop, stream, user) - # invoke Gemini model - return self._generate(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) - - def _generate_anthropic( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke Anthropic large language model - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :return: full response or stream response chunk generator result - """ - # use Anthropic official SDK references - # - https://github.com/anthropics/anthropic-sdk-python - service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"])) - project_id = credentials["vertex_project_id"] - SCOPES = ["https://www.googleapis.com/auth/cloud-platform"] - token = "" - - # get access token from service account credential - if service_account_info: - credentials = service_account.Credentials.from_service_account_info(service_account_info, scopes=SCOPES) - request = google.auth.transport.requests.Request() - credentials.refresh(request) - token = credentials.token - - # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available - # in us-central1 region - if "opus" in model or "claude-3-5-sonnet" in model: - location = "us-east5" - else: - location = "us-central1" - - # use access token to authenticate - if token: - client = AnthropicVertex(region=location, project_id=project_id, access_token=token) - # When access token is empty, try to use the Google Cloud VM's built-in service account - # or the GOOGLE_APPLICATION_CREDENTIALS environment variable - else: - client = AnthropicVertex( - region=location, - project_id=project_id, - ) - - extra_model_kwargs = {} - if stop: - extra_model_kwargs["stop_sequences"] = stop - - system, prompt_message_dicts = self._convert_claude_prompt_messages(prompt_messages) - - if system: - extra_model_kwargs["system"] = system - - response = client.messages.create( - model=model, messages=prompt_message_dicts, stream=stream, **model_parameters, **extra_model_kwargs - ) - - if stream: - return self._handle_claude_stream_response(model, credentials, response, prompt_messages) - - return self._handle_claude_response(model, credentials, response, prompt_messages) - - def _handle_claude_response( - self, model: str, credentials: dict, response: Message, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm chat response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: full response chunk generator result - """ - - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=response.content[0].text) - - # calculate num tokens - if response.usage: - # transform usage - prompt_tokens = response.usage.input_tokens - completion_tokens = response.usage.output_tokens - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - response = LLMResult( - model=response.model, prompt_messages=prompt_messages, message=assistant_prompt_message, usage=usage - ) - - return response - - def _handle_claude_stream_response( - self, - model: str, - credentials: dict, - response: Stream[MessageStreamEvent], - prompt_messages: list[PromptMessage], - ) -> Generator: - """ - Handle llm chat stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: full response or stream response chunk generator result - """ - - try: - full_assistant_content = "" - return_model = None - input_tokens = 0 - output_tokens = 0 - finish_reason = None - index = 0 - - for chunk in response: - if isinstance(chunk, MessageStartEvent): - return_model = chunk.message.model - input_tokens = chunk.message.usage.input_tokens - elif isinstance(chunk, MessageDeltaEvent): - output_tokens = chunk.usage.output_tokens - finish_reason = chunk.delta.stop_reason - elif isinstance(chunk, MessageStopEvent): - usage = self._calc_response_usage(model, credentials, input_tokens, output_tokens) - yield LLMResultChunk( - model=return_model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index + 1, - message=AssistantPromptMessage(content=""), - finish_reason=finish_reason, - usage=usage, - ), - ) - elif isinstance(chunk, ContentBlockDeltaEvent): - chunk_text = chunk.delta.text or "" - full_assistant_content += chunk_text - assistant_prompt_message = AssistantPromptMessage( - content=chunk_text or "", - ) - index = chunk.index - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - ), - ) - except Exception as ex: - raise InvokeError(str(ex)) - - def _calc_claude_response_usage( - self, model: str, credentials: dict, prompt_tokens: int, completion_tokens: int - ) -> LLMUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param prompt_tokens: prompt tokens - :param completion_tokens: completion tokens - :return: usage - """ - # get prompt price info - prompt_price_info = self.get_price( - model=model, - credentials=credentials, - price_type=PriceType.INPUT, - tokens=prompt_tokens, - ) - - # get completion price info - completion_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.OUTPUT, tokens=completion_tokens - ) - - # transform usage - usage = LLMUsage( - prompt_tokens=prompt_tokens, - prompt_unit_price=prompt_price_info.unit_price, - prompt_price_unit=prompt_price_info.unit, - prompt_price=prompt_price_info.total_amount, - completion_tokens=completion_tokens, - completion_unit_price=completion_price_info.unit_price, - completion_price_unit=completion_price_info.unit, - completion_price=completion_price_info.total_amount, - total_tokens=prompt_tokens + completion_tokens, - total_price=prompt_price_info.total_amount + completion_price_info.total_amount, - currency=prompt_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def _convert_claude_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]: - """ - Convert prompt messages to dict list and system - """ - - system = "" - first_loop = True - for message in prompt_messages: - if isinstance(message, SystemPromptMessage): - message.content = message.content.strip() - if first_loop: - system = message.content - first_loop = False - else: - system += "\n" - system += message.content - - prompt_message_dicts = [] - for message in prompt_messages: - if not isinstance(message, SystemPromptMessage): - prompt_message_dicts.append(self._convert_claude_prompt_message_to_dict(message)) - - return system, prompt_message_dicts - - def _convert_claude_prompt_message_to_dict(self, message: PromptMessage) -> dict: - """ - Convert PromptMessage to dict - """ - if isinstance(message, UserPromptMessage): - message = cast(UserPromptMessage, message) - if isinstance(message.content, str): - message_dict = {"role": "user", "content": message.content} - else: - sub_messages = [] - for message_content in message.content: - if message_content.type == PromptMessageContentType.TEXT: - message_content = cast(TextPromptMessageContent, message_content) - sub_message_dict = {"type": "text", "text": message_content.data} - sub_messages.append(sub_message_dict) - elif message_content.type == PromptMessageContentType.IMAGE: - message_content = cast(ImagePromptMessageContent, message_content) - if not message_content.data.startswith("data:"): - # fetch image data from url - try: - image_content = requests.get(message_content.data).content - with Image.open(io.BytesIO(image_content)) as img: - mime_type = f"image/{img.format.lower()}" - base64_data = base64.b64encode(image_content).decode("utf-8") - except Exception as ex: - raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}") - else: - data_split = message_content.data.split(";base64,") - mime_type = data_split[0].replace("data:", "") - base64_data = data_split[1] - - if mime_type not in {"image/jpeg", "image/png", "image/gif", "image/webp"}: - raise ValueError( - f"Unsupported image type {mime_type}, " - f"only support image/jpeg, image/png, image/gif, and image/webp" - ) - - sub_message_dict = { - "type": "image", - "source": {"type": "base64", "media_type": mime_type, "data": base64_data}, - } - sub_messages.append(sub_message_dict) - - message_dict = {"role": "user", "content": sub_messages} - elif isinstance(message, AssistantPromptMessage): - message = cast(AssistantPromptMessage, message) - message_dict = {"role": "assistant", "content": message.content} - elif isinstance(message, SystemPromptMessage): - message = cast(SystemPromptMessage, message) - message_dict = {"role": "system", "content": message.content} - else: - raise ValueError(f"Got unknown type {message}") - - return message_dict - - def get_num_tokens( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - tools: Optional[list[PromptMessageTool]] = None, - ) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param prompt_messages: prompt messages - :param tools: tools for tool calling - :return:md = gml.GenerativeModel(model) - """ - prompt = self._convert_messages_to_prompt(prompt_messages) - - return self._get_num_tokens_by_gpt2(prompt) - - def _convert_messages_to_prompt(self, messages: list[PromptMessage]) -> str: - """ - Format a list of messages into a full prompt for the Google model - - :param messages: List of PromptMessage to combine. - :return: Combined string with necessary human_prompt and ai_prompt tags. - """ - messages = messages.copy() # don't mutate the original list - - text = "".join(self._convert_one_message_to_text(message) for message in messages) - - return text.rstrip() - - def _convert_tools_to_glm_tool(self, tools: list[PromptMessageTool]) -> glm.Tool: - """ - Convert tool messages to glm tools - - :param tools: tool messages - :return: glm tools - """ - return glm.Tool( - function_declarations=[ - glm.FunctionDeclaration( - name=tool.name, - parameters=glm.Schema( - type=glm.Type.OBJECT, - properties={ - key: { - "type_": value.get("type", "string").upper(), - "description": value.get("description", ""), - "enum": value.get("enum", []), - } - for key, value in tool.parameters.get("properties", {}).items() - }, - required=tool.parameters.get("required", []), - ), - ) - for tool in tools - ] - ) - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - - try: - ping_message = SystemPromptMessage(content="ping") - self._generate(model, credentials, [ping_message], {"max_tokens_to_sample": 5}) - - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _generate( - self, - model: str, - credentials: dict, - prompt_messages: list[PromptMessage], - model_parameters: dict, - tools: Optional[list[PromptMessageTool]] = None, - stop: Optional[list[str]] = None, - stream: bool = True, - user: Optional[str] = None, - ) -> Union[LLMResult, Generator]: - """ - Invoke large language model - - :param model: model name - :param credentials: credentials kwargs - :param prompt_messages: prompt messages - :param model_parameters: model parameters - :param stop: stop words - :param stream: is stream response - :param user: unique user id - :return: full response or stream response chunk generator result - """ - config_kwargs = model_parameters.copy() - config_kwargs["max_output_tokens"] = config_kwargs.pop("max_tokens_to_sample", None) - - if stop: - config_kwargs["stop_sequences"] = stop - - service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"])) - project_id = credentials["vertex_project_id"] - location = credentials["vertex_location"] - if service_account_info: - service_accountSA = service_account.Credentials.from_service_account_info(service_account_info) - aiplatform.init(credentials=service_accountSA, project=project_id, location=location) - else: - aiplatform.init(project=project_id, location=location) - - history = [] - system_instruction = "" - # hack for gemini-pro-vision, which currently does not support multi-turn chat - if model == "gemini-1.0-pro-vision-001": - last_msg = prompt_messages[-1] - content = self._format_message_to_glm_content(last_msg) - history.append(content) - else: - for msg in prompt_messages: - if isinstance(msg, SystemPromptMessage): - system_instruction = msg.content - else: - content = self._format_message_to_glm_content(msg) - if history and history[-1].role == content.role: - history[-1].parts.extend(content.parts) - else: - history.append(content) - - google_model = glm.GenerativeModel(model_name=model, system_instruction=system_instruction) - - response = google_model.generate_content( - contents=history, - generation_config=glm.GenerationConfig(**config_kwargs), - stream=stream, - tools=self._convert_tools_to_glm_tool(tools) if tools else None, - ) - - if stream: - return self._handle_generate_stream_response(model, credentials, response, prompt_messages) - - return self._handle_generate_response(model, credentials, response, prompt_messages) - - def _handle_generate_response( - self, model: str, credentials: dict, response: glm.GenerationResponse, prompt_messages: list[PromptMessage] - ) -> LLMResult: - """ - Handle llm response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response - """ - # transform assistant message to prompt message - assistant_prompt_message = AssistantPromptMessage(content=response.candidates[0].content.parts[0].text) - - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - # transform response - result = LLMResult( - model=model, - prompt_messages=prompt_messages, - message=assistant_prompt_message, - usage=usage, - ) - - return result - - def _handle_generate_stream_response( - self, model: str, credentials: dict, response: glm.GenerationResponse, prompt_messages: list[PromptMessage] - ) -> Generator: - """ - Handle llm stream response - - :param model: model name - :param credentials: credentials - :param response: response - :param prompt_messages: prompt messages - :return: llm response chunk generator result - """ - index = -1 - for chunk in response: - for part in chunk.candidates[0].content.parts: - assistant_prompt_message = AssistantPromptMessage(content="") - - if part.text: - assistant_prompt_message.content += part.text - - if part.function_call: - assistant_prompt_message.tool_calls = [ - AssistantPromptMessage.ToolCall( - id=part.function_call.name, - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=part.function_call.name, - arguments=json.dumps(dict(part.function_call.args.items())), - ), - ) - ] - - index += 1 - - if not hasattr(chunk, "finish_reason") or not chunk.finish_reason: - # transform assistant message to prompt message - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta(index=index, message=assistant_prompt_message), - ) - else: - # calculate num tokens - prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) - completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) - - # transform usage - usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) - - yield LLMResultChunk( - model=model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=index, - message=assistant_prompt_message, - finish_reason=chunk.candidates[0].finish_reason, - usage=usage, - ), - ) - - def _convert_one_message_to_text(self, message: PromptMessage) -> str: - """ - Convert a single message to a string. - - :param message: PromptMessage to convert. - :return: String representation of the message. - """ - human_prompt = "\n\nuser:" - ai_prompt = "\n\nmodel:" - - content = message.content - if isinstance(content, list): - content = "".join(c.data for c in content if c.type != PromptMessageContentType.IMAGE) - - if isinstance(message, UserPromptMessage): - message_text = f"{human_prompt} {content}" - elif isinstance(message, AssistantPromptMessage): - message_text = f"{ai_prompt} {content}" - elif isinstance(message, SystemPromptMessage | ToolPromptMessage): - message_text = f"{human_prompt} {content}" - else: - raise ValueError(f"Got unknown type {message}") - - return message_text - - def _format_message_to_glm_content(self, message: PromptMessage) -> glm.Content: - """ - Format a single message into glm.Content for Google API - - :param message: one PromptMessage - :return: glm Content representation of message - """ - if isinstance(message, UserPromptMessage): - glm_content = glm.Content(role="user", parts=[]) - - if isinstance(message.content, str): - glm_content = glm.Content(role="user", parts=[glm.Part.from_text(message.content)]) - else: - parts = [] - for c in message.content: - if c.type == PromptMessageContentType.TEXT: - parts.append(glm.Part.from_text(c.data)) - else: - metadata, data = c.data.split(",", 1) - mime_type = metadata.split(";", 1)[0].split(":")[1] - parts.append(glm.Part.from_data(mime_type=mime_type, data=data)) - glm_content = glm.Content(role="user", parts=parts) - return glm_content - elif isinstance(message, AssistantPromptMessage): - if message.content: - glm_content = glm.Content(role="model", parts=[glm.Part.from_text(message.content)]) - if message.tool_calls: - glm_content = glm.Content( - role="model", - parts=[ - glm.Part.from_function_response( - glm.FunctionCall( - name=message.tool_calls[0].function.name, - args=json.loads(message.tool_calls[0].function.arguments), - ) - ) - ], - ) - return glm_content - elif isinstance(message, ToolPromptMessage): - glm_content = glm.Content( - role="function", - parts=[ - glm.Part( - function_response=glm.FunctionResponse( - name=message.name, response={"response": message.content} - ) - ) - ], - ) - return glm_content - else: - raise ValueError(f"Got unknown type {message}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the ermd = gml.GenerativeModel(model) error type thrown to the caller - The value is the md = gml.GenerativeModel(model) error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke emd = gml.GenerativeModel(model) error mapping - """ - return { - InvokeConnectionError: [exceptions.RetryError], - InvokeServerUnavailableError: [ - exceptions.ServiceUnavailable, - exceptions.InternalServerError, - exceptions.BadGateway, - exceptions.GatewayTimeout, - exceptions.DeadlineExceeded, - ], - InvokeRateLimitError: [exceptions.ResourceExhausted, exceptions.TooManyRequests], - InvokeAuthorizationError: [ - exceptions.Unauthenticated, - exceptions.PermissionDenied, - exceptions.Unauthenticated, - exceptions.Forbidden, - ], - InvokeBadRequestError: [ - exceptions.BadRequest, - exceptions.InvalidArgument, - exceptions.FailedPrecondition, - exceptions.OutOfRange, - exceptions.NotFound, - exceptions.MethodNotAllowed, - exceptions.Conflict, - exceptions.AlreadyExists, - exceptions.Aborted, - exceptions.LengthRequired, - exceptions.PreconditionFailed, - exceptions.RequestRangeNotSatisfiable, - exceptions.Cancelled, - ], - } diff --git a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py deleted file mode 100644 index fce9544df0..0000000000 --- a/api/core/model_runtime/model_providers/vertex_ai/text_embedding/text_embedding.py +++ /dev/null @@ -1,187 +0,0 @@ -import base64 -import json -import time -from decimal import Decimal -from typing import Optional - -import tiktoken -from google.cloud import aiplatform -from google.oauth2 import service_account -from vertexai.language_models import TextEmbeddingModel as VertexTextEmbeddingModel - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - PriceConfig, - PriceType, -) -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.vertex_ai._common import _CommonVertexAi - - -class VertexAiTextEmbeddingModel(_CommonVertexAi, TextEmbeddingModel): - """ - Model class for Vertex AI text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"])) - project_id = credentials["vertex_project_id"] - location = credentials["vertex_location"] - if service_account_info: - service_accountSA = service_account.Credentials.from_service_account_info(service_account_info) - aiplatform.init(credentials=service_accountSA, project=project_id, location=location) - else: - aiplatform.init(project=project_id, location=location) - - client = VertexTextEmbeddingModel.from_pretrained(model) - - embeddings_batch, embedding_used_tokens = self._embedding_invoke(client=client, texts=texts) - - # calc usage - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=embedding_used_tokens) - - return TextEmbeddingResult(embeddings=embeddings_batch, usage=usage, model=model) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - - try: - enc = tiktoken.encoding_for_model(model) - except KeyError: - enc = tiktoken.get_encoding("cl100k_base") - - total_num_tokens = 0 - for text in texts: - # calculate the number of tokens in the encoded text - tokenized_text = enc.encode(text) - total_num_tokens += len(tokenized_text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - service_account_info = json.loads(base64.b64decode(credentials["vertex_service_account_key"])) - project_id = credentials["vertex_project_id"] - location = credentials["vertex_location"] - if service_account_info: - service_accountSA = service_account.Credentials.from_service_account_info(service_account_info) - aiplatform.init(credentials=service_accountSA, project=project_id, location=location) - else: - aiplatform.init(project=project_id, location=location) - - client = VertexTextEmbeddingModel.from_pretrained(model) - - # call embedding model - self._embedding_invoke(model=model, client=client, texts=["ping"]) - except Exception as ex: - raise CredentialsValidateFailedError(str(ex)) - - def _embedding_invoke(self, client: VertexTextEmbeddingModel, texts: list[str]) -> [list[float], int]: # type: ignore - """ - Invoke embedding model - - :param model: model name - :param client: model client - :param texts: texts to embed - :return: embeddings and used tokens - """ - response = client.get_embeddings(texts) - - embeddings = [] - token_usage = 0 - - for i in range(len(response)): - embeddings.append(response[i].values) - token_usage += int(response[i].statistics.token_count) - - return embeddings, token_usage - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties={ - ModelPropertyKey.CONTEXT_SIZE: int(credentials.get("context_size")), - ModelPropertyKey.MAX_CHUNKS: 1, - }, - parameter_rules=[], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - ) - - return entity diff --git a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py deleted file mode 100644 index 0dd4037c95..0000000000 --- a/api/core/model_runtime/model_providers/volcengine_maas/text_embedding/text_embedding.py +++ /dev/null @@ -1,198 +0,0 @@ -import time -from decimal import Decimal -from typing import Optional - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import ( - AIModelEntity, - FetchFrom, - ModelPropertyKey, - ModelType, - PriceConfig, - PriceType, -) -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.volcengine_maas.client import ArkClientV3 -from core.model_runtime.model_providers.volcengine_maas.legacy.client import MaaSClient -from core.model_runtime.model_providers.volcengine_maas.legacy.errors import ( - AuthErrors, - BadRequestErrors, - ConnectionErrors, - MaasError, - RateLimitErrors, - ServerUnavailableErrors, -) -from core.model_runtime.model_providers.volcengine_maas.text_embedding.models import get_model_config - - -class VolcengineMaaSTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for VolcengineMaaS text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - if ArkClientV3.is_legacy(credentials): - return self._generate_v2(model, credentials, texts, user) - - return self._generate_v3(model, credentials, texts, user) - - def _generate_v2( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None - ) -> TextEmbeddingResult: - client = MaaSClient.from_credential(credentials) - resp = MaaSClient.wrap_exception(lambda: client.embeddings(texts)) - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=resp["usage"]["total_tokens"]) - - result = TextEmbeddingResult(model=model, embeddings=[v["embedding"] for v in resp["data"]], usage=usage) - - return result - - def _generate_v3( - self, model: str, credentials: dict, texts: list[str], user: Optional[str] = None - ) -> TextEmbeddingResult: - client = ArkClientV3.from_credentials(credentials) - resp = client.embeddings(texts) - - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=resp.usage.total_tokens) - - result = TextEmbeddingResult(model=model, embeddings=[v.embedding for v in resp.data], usage=usage) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use GPT2Tokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - if ArkClientV3.is_legacy(credentials): - return self._validate_credentials_v2(model, credentials) - return self._validate_credentials_v3(model, credentials) - - def _validate_credentials_v2(self, model: str, credentials: dict) -> None: - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except MaasError as e: - raise CredentialsValidateFailedError(e.message) - - def _validate_credentials_v3(self, model: str, credentials: dict) -> None: - try: - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except Exception as e: - raise CredentialsValidateFailedError(e) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - """ - Map model invoke error to unified error - The key is the error type thrown to the caller - The value is the error type thrown by the model, - which needs to be converted into a unified error type for the caller. - - :return: Invoke error mapping - """ - return { - InvokeConnectionError: ConnectionErrors.values(), - InvokeServerUnavailableError: ServerUnavailableErrors.values(), - InvokeRateLimitError: RateLimitErrors.values(), - InvokeAuthorizationError: AuthErrors.values(), - InvokeBadRequestError: BadRequestErrors.values(), - } - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity: - """ - generate custom model entities from credentials - """ - model_config = get_model_config(credentials) - model_properties = { - ModelPropertyKey.CONTEXT_SIZE: model_config.properties.context_size, - ModelPropertyKey.MAX_CHUNKS: model_config.properties.max_chunks, - } - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - model_type=ModelType.TEXT_EMBEDDING, - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_properties=model_properties, - parameter_rules=[], - pricing=PriceConfig( - input=Decimal(credentials.get("input_price", 0)), - unit=Decimal(credentials.get("unit", 0)), - currency=credentials.get("currency", "USD"), - ), - ) - - return entity - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py deleted file mode 100644 index c21d0c0552..0000000000 --- a/api/core/model_runtime/model_providers/wenxin/text_embedding/text_embedding.py +++ /dev/null @@ -1,187 +0,0 @@ -import time -from abc import abstractmethod -from collections.abc import Mapping -from json import dumps -from typing import Any, Optional - -import numpy as np -from requests import Response, post - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.model_entities import PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import InvokeError -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.wenxin._common import BaiduAccessToken, _CommonWenxin -from core.model_runtime.model_providers.wenxin.wenxin_errors import ( - BadRequestError, - InternalServerError, - invoke_error_mapping, -) - - -class TextEmbedding: - @abstractmethod - def embed_documents(self, model: str, texts: list[str], user: str) -> (list[list[float]], int, int): - raise NotImplementedError - - -class WenxinTextEmbedding(_CommonWenxin, TextEmbedding): - def embed_documents(self, model: str, texts: list[str], user: str) -> (list[list[float]], int, int): - access_token = self._get_access_token() - url = f"{self.api_bases[model]}?access_token={access_token}" - body = self._build_embed_request_body(model, texts, user) - headers = { - "Content-Type": "application/json", - } - - resp = post(url, data=dumps(body), headers=headers) - if resp.status_code != 200: - raise InternalServerError(f"Failed to invoke ernie bot: {resp.text}") - return self._handle_embed_response(model, resp) - - def _build_embed_request_body(self, model: str, texts: list[str], user: str) -> dict[str, Any]: - if len(texts) == 0: - raise BadRequestError("The number of texts should not be zero.") - body = { - "input": texts, - "user_id": user, - } - return body - - def _handle_embed_response(self, model: str, response: Response) -> (list[list[float]], int, int): - data = response.json() - if "error_code" in data: - code = data["error_code"] - msg = data["error_msg"] - # raise error - self._handle_error(code, msg) - - embeddings = [v["embedding"] for v in data["data"]] - _usage = data["usage"] - tokens = _usage["prompt_tokens"] - total_tokens = _usage["total_tokens"] - - return embeddings, tokens, total_tokens - - -class WenxinTextEmbeddingModel(TextEmbeddingModel): - def _create_text_embedding(self, api_key: str, secret_key: str) -> TextEmbedding: - return WenxinTextEmbedding(api_key, secret_key) - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - - api_key = credentials["api_key"] - secret_key = credentials["secret_key"] - embedding: TextEmbedding = self._create_text_embedding(api_key, secret_key) - user = user or "ErnieBotDefault" - - context_size = self._get_context_size(model, credentials) - max_chunks = self._get_max_chunks(model, credentials) - inputs = [] - indices = [] - used_tokens = 0 - used_total_tokens = 0 - - for i, text in enumerate(texts): - # Here token count is only an approximation based on the GPT2 tokenizer - num_tokens = self._get_num_tokens_by_gpt2(text) - - if num_tokens >= context_size: - cutoff = int(np.floor(len(text) * (context_size / num_tokens))) - # if num tokens is larger than context length, only use the start - inputs.append(text[0:cutoff]) - else: - inputs.append(text) - indices += [i] - - batched_embeddings = [] - _iter = range(0, len(inputs), max_chunks) - for i in _iter: - embeddings_batch, _used_tokens, _total_used_tokens = embedding.embed_documents( - model, inputs[i : i + max_chunks], user - ) - used_tokens += _used_tokens - used_total_tokens += _total_used_tokens - batched_embeddings += embeddings_batch - - usage = self._calc_response_usage(model, credentials, used_tokens, used_total_tokens) - return TextEmbeddingResult( - model=model, - embeddings=batched_embeddings, - usage=usage, - ) - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - if len(texts) == 0: - return 0 - total_num_tokens = 0 - for text in texts: - total_num_tokens += self._get_num_tokens_by_gpt2(text) - - return total_num_tokens - - def validate_credentials(self, model: str, credentials: Mapping) -> None: - api_key = credentials["api_key"] - secret_key = credentials["secret_key"] - try: - BaiduAccessToken.get_access_token(api_key, secret_key) - except Exception as e: - raise CredentialsValidateFailedError(f"Credentials validation failed: {e}") - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return invoke_error_mapping() - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int, total_tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=total_tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage diff --git a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py deleted file mode 100644 index 1627239132..0000000000 --- a/api/core/model_runtime/model_providers/xinference/text_embedding/text_embedding.py +++ /dev/null @@ -1,204 +0,0 @@ -import time -from typing import Optional - -from xinference_client.client.restful.restful_client import Client, RESTfulEmbeddingModelHandle - -from core.embedding.embedding_constant import EmbeddingInputType -from core.model_runtime.entities.common_entities import I18nObject -from core.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelPropertyKey, ModelType, PriceType -from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult -from core.model_runtime.errors.invoke import ( - InvokeAuthorizationError, - InvokeBadRequestError, - InvokeConnectionError, - InvokeError, - InvokeRateLimitError, - InvokeServerUnavailableError, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel -from core.model_runtime.model_providers.xinference.xinference_helper import XinferenceHelper - - -class XinferenceTextEmbeddingModel(TextEmbeddingModel): - """ - Model class for Xinference text embedding model. - """ - - def _invoke( - self, - model: str, - credentials: dict, - texts: list[str], - user: Optional[str] = None, - input_type: EmbeddingInputType = EmbeddingInputType.DOCUMENT, - ) -> TextEmbeddingResult: - """ - Invoke text embedding model - - credentials should be like: - { - 'server_url': 'server url', - 'model_uid': 'model uid', - } - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :param user: unique user id - :param input_type: input type - :return: embeddings result - """ - server_url = credentials["server_url"] - model_uid = credentials["model_uid"] - api_key = credentials.get("api_key") - server_url = server_url.removesuffix("/") - auth_headers = {"Authorization": f"Bearer {api_key}"} if api_key else {} - - try: - handle = RESTfulEmbeddingModelHandle(model_uid, server_url, auth_headers) - embeddings = handle.create_embedding(input=texts) - except RuntimeError as e: - raise InvokeServerUnavailableError(str(e)) - - """ - for convenience, the response json is like: - class Embedding(TypedDict): - object: Literal["list"] - model: str - data: List[EmbeddingData] - usage: EmbeddingUsage - class EmbeddingUsage(TypedDict): - prompt_tokens: int - total_tokens: int - class EmbeddingData(TypedDict): - index: int - object: str - embedding: List[float] - """ - - usage = embeddings["usage"] - usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage["total_tokens"]) - - result = TextEmbeddingResult( - model=model, embeddings=[embedding["embedding"] for embedding in embeddings["data"]], usage=usage - ) - - return result - - def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: - """ - Get number of tokens for given prompt messages - - :param model: model name - :param credentials: model credentials - :param texts: texts to embed - :return: - """ - num_tokens = 0 - for text in texts: - # use GPT2Tokenizer to get num tokens - num_tokens += self._get_num_tokens_by_gpt2(text) - return num_tokens - - def validate_credentials(self, model: str, credentials: dict) -> None: - """ - Validate model credentials - - :param model: model name - :param credentials: model credentials - :return: - """ - try: - if "/" in credentials["model_uid"] or "?" in credentials["model_uid"] or "#" in credentials["model_uid"]: - raise CredentialsValidateFailedError("model_uid should not contain /, ?, or #") - - server_url = credentials["server_url"] - model_uid = credentials["model_uid"] - api_key = credentials.get("api_key") - extra_args = XinferenceHelper.get_xinference_extra_parameter( - server_url=server_url, - model_uid=model_uid, - api_key=api_key, - ) - - if extra_args.max_tokens: - credentials["max_tokens"] = extra_args.max_tokens - server_url = server_url.removesuffix("/") - - client = Client( - base_url=server_url, - api_key=api_key, - ) - - try: - handle = client.get_model(model_uid=model_uid) - except RuntimeError as e: - raise InvokeAuthorizationError(e) - - if not isinstance(handle, RESTfulEmbeddingModelHandle): - raise InvokeBadRequestError( - "please check model type, the model you want to invoke is not a text embedding model" - ) - - self._invoke(model=model, credentials=credentials, texts=["ping"]) - except InvokeAuthorizationError as e: - raise CredentialsValidateFailedError(f"Failed to validate credentials for model {model}: {e}") - except RuntimeError as e: - raise CredentialsValidateFailedError(e) - - @property - def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: - return { - InvokeConnectionError: [InvokeConnectionError], - InvokeServerUnavailableError: [InvokeServerUnavailableError], - InvokeRateLimitError: [InvokeRateLimitError], - InvokeAuthorizationError: [InvokeAuthorizationError], - InvokeBadRequestError: [KeyError], - } - - def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: - """ - Calculate response usage - - :param model: model name - :param credentials: model credentials - :param tokens: input tokens - :return: usage - """ - # get input price info - input_price_info = self.get_price( - model=model, credentials=credentials, price_type=PriceType.INPUT, tokens=tokens - ) - - # transform usage - usage = EmbeddingUsage( - tokens=tokens, - total_tokens=tokens, - unit_price=input_price_info.unit_price, - price_unit=input_price_info.unit, - total_price=input_price_info.total_amount, - currency=input_price_info.currency, - latency=time.perf_counter() - self.started_at, - ) - - return usage - - def get_customizable_model_schema(self, model: str, credentials: dict) -> AIModelEntity | None: - """ - used to define customizable model schema - """ - - entity = AIModelEntity( - model=model, - label=I18nObject(en_US=model), - fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, - model_type=ModelType.TEXT_EMBEDDING, - model_properties={ - ModelPropertyKey.MAX_CHUNKS: 1, - ModelPropertyKey.CONTEXT_SIZE: "max_tokens" in credentials and credentials["max_tokens"] or 512, - }, - parameter_rules=[], - ) - - return entity diff --git a/api/core/plugin/manager/model.py b/api/core/plugin/manager/model.py index 30bd9bc0a8..fb58c4bb8d 100644 --- a/api/core/plugin/manager/model.py +++ b/api/core/plugin/manager/model.py @@ -235,6 +235,7 @@ class PluginModelManager(BasePluginManager): model: str, credentials: dict, texts: list[str], + input_type: str, ) -> TextEmbeddingResult: """ Invoke text embedding @@ -252,6 +253,7 @@ class PluginModelManager(BasePluginManager): "model": model, "credentials": credentials, "texts": texts, + "input_type": input_type, }, } ), @@ -272,7 +274,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, texts: list[str], @@ -289,7 +290,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "text-embedding", "model": model, "credentials": credentials, "texts": texts, @@ -313,7 +314,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, query: str, @@ -333,7 +333,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "rerank", "model": model, "credentials": credentials, "query": query, @@ -360,7 +360,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, content_text: str, @@ -378,7 +377,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "tts", "model": model, "credentials": credentials, "content_text": content_text, @@ -405,7 +404,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, language: Optional[str] = None, @@ -422,7 +420,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "tts", "model": model, "credentials": credentials, "language": language, @@ -447,7 +445,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, file: IO[bytes], @@ -464,7 +461,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "speech2text", "model": model, "credentials": credentials, "file": binascii.hexlify(file.read()).decode(), @@ -488,7 +485,6 @@ class PluginModelManager(BasePluginManager): user_id: str, plugin_id: str, provider: str, - model_type: str, model: str, credentials: dict, text: str, @@ -505,7 +501,7 @@ class PluginModelManager(BasePluginManager): "user_id": user_id, "data": { "provider": provider, - "model_type": model_type, + "model_type": "moderation", "model": model, "credentials": credentials, "text": text, diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index cb49a6cf56..ae3934327e 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -244,12 +244,11 @@ class ProviderManager: (model for model in available_models if model.model == "gpt-4"), available_models[0] ) - default_model = TenantDefaultModel( - tenant_id=tenant_id, - model_type=model_type.to_origin_model_type(), - provider_name=available_model.provider.provider, - model_name=available_model.model, - ) + default_model = TenantDefaultModel() + default_model.tenant_id = tenant_id + default_model.model_type = model_type.to_origin_model_type() + default_model.provider_name = available_model.provider.provider + default_model.model_name = available_model.model db.session.add(default_model) db.session.commit() @@ -489,15 +488,14 @@ class ProviderManager: # Init trial provider records if not exists if ProviderQuotaType.TRIAL not in provider_quota_to_provider_record_dict: try: - provider_record = Provider( - tenant_id=tenant_id, - provider_name=provider_name, - provider_type=ProviderType.SYSTEM.value, - quota_type=ProviderQuotaType.TRIAL.value, - quota_limit=quota.quota_limit, - quota_used=0, - is_valid=True, - ) + provider_record = Provider() + provider_record.tenant_id = tenant_id + provider_record.provider_name = provider_name + provider_record.provider_type = ProviderType.SYSTEM.value + provider_record.quota_type = ProviderQuotaType.TRIAL.value + provider_record.quota_limit = quota.quota_limit + provider_record.quota_used = 0 + provider_record.is_valid = True db.session.add(provider_record) db.session.commit() except IntegrityError: diff --git a/api/core/tools/builtin_tool/tool.py b/api/core/tools/builtin_tool/tool.py index fe77f9ac77..abba542b8e 100644 --- a/api/core/tools/builtin_tool/tool.py +++ b/api/core/tools/builtin_tool/tool.py @@ -3,7 +3,6 @@ from core.model_runtime.entities.message_entities import PromptMessage, SystemPr from core.tools.__base.tool import Tool from core.tools.entities.tool_entities import ToolProviderType from core.tools.utils.model_invocation_utils import ModelInvocationUtils -from core.tools.utils.web_reader_tool import get_url _SUMMARY_PROMPT = """You are a professional language researcher, you are interested in the language and you can quickly aimed at the main point of an webpage and reproduce it in your own words but @@ -124,9 +123,3 @@ class BuiltinTool(Tool): return self.summary(user_id=user_id, content=result) return result - - def get_url(self, url: str, user_agent: str | None = None) -> str: - """ - get url - """ - return get_url(url, user_agent=user_agent) diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py deleted file mode 100644 index dcbae9f5aa..0000000000 --- a/api/core/tools/utils/web_reader_tool.py +++ /dev/null @@ -1,357 +0,0 @@ -import hashlib -import json -import mimetypes -import os -import re -import site -import subprocess -import tempfile -import unicodedata -from contextlib import contextmanager -from pathlib import Path -from urllib.parse import unquote - -import chardet -import cloudscraper -from bs4 import BeautifulSoup, CData, Comment, NavigableString -from regex import regex - -from core.helper import ssrf_proxy -from core.rag.extractor import extract_processor -from core.rag.extractor.extract_processor import ExtractProcessor - -FULL_TEMPLATE = """ -TITLE: {title} -AUTHORS: {authors} -PUBLISH DATE: {publish_date} -TOP_IMAGE_URL: {top_image} -TEXT: - -{text} -""" - - -def page_result(text: str, cursor: int, max_length: int) -> str: - """Page through `text` and return a substring of `max_length` characters starting from `cursor`.""" - return text[cursor : cursor + max_length] - - -def get_url(url: str, user_agent: str | None = None) -> str: - """Fetch URL and return the contents as a string.""" - headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" - " Chrome/91.0.4472.124 Safari/537.36" - } - if user_agent: - headers["User-Agent"] = user_agent - - main_content_type = None - supported_content_types = extract_processor.SUPPORT_URL_CONTENT_TYPES + ["text/html"] - response = ssrf_proxy.head(url, headers=headers, follow_redirects=True, timeout=(5, 10)) - - if response.status_code == 200: - # check content-type - content_type = response.headers.get("Content-Type") - if content_type: - main_content_type = response.headers.get("Content-Type").split(";")[0].strip() - else: - content_disposition = response.headers.get("Content-Disposition", "") - filename_match = re.search(r'filename="([^"]+)"', content_disposition) - if filename_match: - filename = unquote(filename_match.group(1)) - extension = re.search(r"\.(\w+)$", filename) - if extension: - main_content_type = mimetypes.guess_type(filename)[0] - - if main_content_type not in supported_content_types: - return "Unsupported content-type [{}] of URL.".format(main_content_type) - - if main_content_type in extract_processor.SUPPORT_URL_CONTENT_TYPES: - return ExtractProcessor.load_from_url(url, return_text=True) - - response = ssrf_proxy.get(url, headers=headers, follow_redirects=True, timeout=(120, 300)) - elif response.status_code == 403: - scraper = cloudscraper.create_scraper() - scraper.perform_request = ssrf_proxy.make_request - response = scraper.get(url, headers=headers, follow_redirects=True, timeout=(120, 300)) - - if response.status_code != 200: - return "URL returned status code {}.".format(response.status_code) - - # Detect encoding using chardet - detected_encoding = chardet.detect(response.content) - encoding = detected_encoding["encoding"] - if encoding: - try: - content = response.content.decode(encoding) - except (UnicodeDecodeError, TypeError): - content = response.text - else: - content = response.text - - a = extract_using_readabilipy(content) - - if not a["plain_text"] or not a["plain_text"].strip(): - return "" - - res = FULL_TEMPLATE.format( - title=a["title"], - authors=a["byline"], - publish_date=a["date"], - top_image="", - text=a["plain_text"] or "", - ) - - return res - - -def extract_using_readabilipy(html): - with tempfile.NamedTemporaryFile(delete=False, mode="w+") as f_html: - f_html.write(html) - f_html.close() - html_path = f_html.name - - # Call Mozilla's Readability.js Readability.parse() function via node, writing output to a temporary file - article_json_path = html_path + ".json" - jsdir = os.path.join(find_module_path("readabilipy"), "javascript") - with chdir(jsdir): - subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path]) - - # Read output of call to Readability.parse() from JSON file and return as Python dictionary - input_json = json.loads(Path(article_json_path).read_text(encoding="utf-8")) - - # Deleting files after processing - os.unlink(article_json_path) - os.unlink(html_path) - - article_json = { - "title": None, - "byline": None, - "date": None, - "content": None, - "plain_content": None, - "plain_text": None, - } - # Populate article fields from readability fields where present - if input_json: - if input_json.get("title"): - article_json["title"] = input_json["title"] - if input_json.get("byline"): - article_json["byline"] = input_json["byline"] - if input_json.get("date"): - article_json["date"] = input_json["date"] - if input_json.get("content"): - article_json["content"] = input_json["content"] - article_json["plain_content"] = plain_content(article_json["content"], False, False) - article_json["plain_text"] = extract_text_blocks_as_plain_text(article_json["plain_content"]) - if input_json.get("textContent"): - article_json["plain_text"] = input_json["textContent"] - article_json["plain_text"] = re.sub(r"\n\s*\n", "\n", article_json["plain_text"]) - - return article_json - - -def find_module_path(module_name): - for package_path in site.getsitepackages(): - potential_path = os.path.join(package_path, module_name) - if os.path.exists(potential_path): - return potential_path - - return None - - -@contextmanager -def chdir(path): - """Change directory in context and return to original on exit""" - # From https://stackoverflow.com/a/37996581, couldn't find a built-in - original_path = os.getcwd() - os.chdir(path) - try: - yield - finally: - os.chdir(original_path) - - -def extract_text_blocks_as_plain_text(paragraph_html): - # Load article as DOM - soup = BeautifulSoup(paragraph_html, "html.parser") - # Select all lists - list_elements = soup.find_all(["ul", "ol"]) - # Prefix text in all list items with "* " and make lists paragraphs - for list_element in list_elements: - plain_items = "".join( - list(filter(None, [plain_text_leaf_node(li)["text"] for li in list_element.find_all("li")])) - ) - list_element.string = plain_items - list_element.name = "p" - # Select all text blocks - text_blocks = [s.parent for s in soup.find_all(string=True)] - text_blocks = [plain_text_leaf_node(block) for block in text_blocks] - # Drop empty paragraphs - text_blocks = list(filter(lambda p: p["text"] is not None, text_blocks)) - return text_blocks - - -def plain_text_leaf_node(element): - # Extract all text, stripped of any child HTML elements and normalize it - plain_text = normalize_text(element.get_text()) - if plain_text != "" and element.name == "li": - plain_text = "* {}, ".format(plain_text) - if plain_text == "": - plain_text = None - if "data-node-index" in element.attrs: - plain = {"node_index": element["data-node-index"], "text": plain_text} - else: - plain = {"text": plain_text} - return plain - - -def plain_content(readability_content, content_digests, node_indexes): - # Load article as DOM - soup = BeautifulSoup(readability_content, "html.parser") - # Make all elements plain - elements = plain_elements(soup.contents, content_digests, node_indexes) - if node_indexes: - # Add node index attributes to nodes - elements = [add_node_indexes(element) for element in elements] - # Replace article contents with plain elements - soup.contents = elements - return str(soup) - - -def plain_elements(elements, content_digests, node_indexes): - # Get plain content versions of all elements - elements = [plain_element(element, content_digests, node_indexes) for element in elements] - if content_digests: - # Add content digest attribute to nodes - elements = [add_content_digest(element) for element in elements] - return elements - - -def plain_element(element, content_digests, node_indexes): - # For lists, we make each item plain text - if is_leaf(element): - # For leaf node elements, extract the text content, discarding any HTML tags - # 1. Get element contents as text - plain_text = element.get_text() - # 2. Normalize the extracted text string to a canonical representation - plain_text = normalize_text(plain_text) - # 3. Update element content to be plain text - element.string = plain_text - elif is_text(element): - if is_non_printing(element): - # The simplified HTML may have come from Readability.js so might - # have non-printing text (e.g. Comment or CData). In this case, we - # keep the structure, but ensure that the string is empty. - element = type(element)("") - else: - plain_text = element.string - plain_text = normalize_text(plain_text) - element = type(element)(plain_text) - else: - # If not a leaf node or leaf type call recursively on child nodes, replacing - element.contents = plain_elements(element.contents, content_digests, node_indexes) - return element - - -def add_node_indexes(element, node_index="0"): - # Can't add attributes to string types - if is_text(element): - return element - # Add index to current element - element["data-node-index"] = node_index - # Add index to child elements - for local_idx, child in enumerate([c for c in element.contents if not is_text(c)], start=1): - # Can't add attributes to leaf string types - child_index = "{stem}.{local}".format(stem=node_index, local=local_idx) - add_node_indexes(child, node_index=child_index) - return element - - -def normalize_text(text): - """Normalize unicode and whitespace.""" - # Normalize unicode first to try and standardize whitespace characters as much as possible before normalizing them - text = strip_control_characters(text) - text = normalize_unicode(text) - text = normalize_whitespace(text) - return text - - -def strip_control_characters(text): - """Strip out unicode control characters which might break the parsing.""" - # Unicode control characters - # [Cc]: Other, Control [includes new lines] - # [Cf]: Other, Format - # [Cn]: Other, Not Assigned - # [Co]: Other, Private Use - # [Cs]: Other, Surrogate - control_chars = {"Cc", "Cf", "Cn", "Co", "Cs"} - retained_chars = ["\t", "\n", "\r", "\f"] - - # Remove non-printing control characters - return "".join( - [ - "" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char - for char in text - ] - ) - - -def normalize_unicode(text): - """Normalize unicode such that things that are visually equivalent map to the same unicode string where possible.""" - normal_form = "NFKC" - text = unicodedata.normalize(normal_form, text) - return text - - -def normalize_whitespace(text): - """Replace runs of whitespace characters with a single space as this is what happens when HTML text is displayed.""" - text = regex.sub(r"\s+", " ", text) - # Remove leading and trailing whitespace - text = text.strip() - return text - - -def is_leaf(element): - return element.name in {"p", "li"} - - -def is_text(element): - return isinstance(element, NavigableString) - - -def is_non_printing(element): - return any(isinstance(element, _e) for _e in [Comment, CData]) - - -def add_content_digest(element): - if not is_text(element): - element["data-content-digest"] = content_digest(element) - return element - - -def content_digest(element): - if is_text(element): - # Hash - trimmed_string = element.string.strip() - if trimmed_string == "": - digest = "" - else: - digest = hashlib.sha256(trimmed_string.encode("utf-8")).hexdigest() - else: - contents = element.contents - num_contents = len(contents) - if num_contents == 0: - # No hash when no child elements exist - digest = "" - elif num_contents == 1: - # If single child, use digest of child - digest = content_digest(contents[0]) - else: - # Build content digest from the "non-empty" digests of child nodes - digest = hashlib.sha256() - child_digests = list(filter(lambda x: x != "", [content_digest(content) for content in contents])) - for child in child_digests: - digest.update(child.encode("utf-8")) - digest = digest.hexdigest() - return digest diff --git a/api/poetry.lock b/api/poetry.lock index 85c68cd75f..5c8485990c 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "aiohappyeyeballs" -version = "2.4.0" +version = "2.4.2" description = "Happy Eyeballs for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, - {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, + {file = "aiohappyeyeballs-2.4.2-py3-none-any.whl", hash = "sha256:8522691d9a154ba1145b157d6d5c15e5c692527ce6a53c5e5f9876977f6dab2f"}, + {file = "aiohappyeyeballs-2.4.2.tar.gz", hash = "sha256:4ca893e6c5c1f5bf3888b04cb5a3bee24995398efef6e0b9f747b5e89d84fd74"}, ] [[package]] @@ -123,20 +123,6 @@ yarl = ">=1.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] -[[package]] -name = "aiohttp-retry" -version = "2.8.3" -description = "Simple retry client for aiohttp" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiohttp_retry-2.8.3-py3-none-any.whl", hash = "sha256:3aeeead8f6afe48272db93ced9440cf4eda8b6fd7ee2abb25357b7eb28525b45"}, - {file = "aiohttp_retry-2.8.3.tar.gz", hash = "sha256:9a8e637e31682ad36e1ff9f8bcba912fcfc7d7041722bc901a4b948da4d71ea9"}, -] - -[package.dependencies] -aiohttp = "*" - [[package]] name = "aiosignal" version = "1.3.1" @@ -153,13 +139,13 @@ frozenlist = ">=1.1.0" [[package]] name = "alembic" -version = "1.13.2" +version = "1.13.3" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.8" files = [ - {file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"}, - {file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"}, + {file = "alembic-1.13.3-py3-none-any.whl", hash = "sha256:908e905976d15235fae59c9ac42c4c5b75cfcefe3d27c0fbf7ae15a37715d80e"}, + {file = "alembic-1.13.3.tar.gz", hash = "sha256:203503117415561e203aa14541740643a611f641517f0209fcae63e9fa09f1a2"}, ] [package.dependencies] @@ -293,13 +279,13 @@ alibabacloud-tea = "*" [[package]] name = "alibabacloud-tea" -version = "0.3.9" +version = "0.3.10" description = "The tea module of alibabaCloud Python SDK." optional = false python-versions = ">=3.6" files = [ - {file = "alibabacloud-tea-0.3.9.tar.gz", hash = "sha256:a9689770003fa9313d1995812f9fe36a2be315e5cdfc8d58de0d96808219ced9"}, - {file = "alibabacloud_tea-0.3.9-py3-none-any.whl", hash = "sha256:402fd2a92e6729f228d8c0300b182f80019edce19d83afa497aeb15fd7947f9a"}, + {file = "alibabacloud-tea-0.3.10.tar.gz", hash = "sha256:bcf972416af5d8b5e671078c2ec20296dbc792e85e68acd685730a0a016afd2a"}, + {file = "alibabacloud_tea-0.3.10-py3-none-any.whl", hash = "sha256:9136f302a3baea8a1528f500bf5d47c3727b827a09b5c14b283ca53578e30082"}, ] [package.dependencies] @@ -321,17 +307,17 @@ alibabacloud-tea = ">=0.0.1" [[package]] name = "alibabacloud-tea-openapi" -version = "0.3.11" +version = "0.3.12" description = "Alibaba Cloud openapi SDK Library for Python" optional = false python-versions = ">=3.6" files = [ - {file = "alibabacloud_tea_openapi-0.3.11.tar.gz", hash = "sha256:3f5cace1b1aeb8a64587574097403cfd066b86ee4c3c9abde587f9abfcad38de"}, + {file = "alibabacloud_tea_openapi-0.3.12.tar.gz", hash = "sha256:2e14809f357438e62c1ef4976a7655110dd54a75bbfa7d905fa3798355cfd974"}, ] [package.dependencies] -alibabacloud_credentials = ">=0.3.1,<1.0.0" -alibabacloud_gateway_spi = ">=0.0.1,<1.0.0" +alibabacloud_credentials = ">=0.3.5,<1.0.0" +alibabacloud_gateway_spi = ">=0.0.2,<1.0.0" alibabacloud_openapi_util = ">=0.2.1,<1.0.0" alibabacloud_tea_util = ">=0.3.13,<1.0.0" alibabacloud_tea_xml = ">=0.0.2,<1.0.0" @@ -429,39 +415,15 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[[package]] -name = "anthropic" -version = "0.23.1" -description = "The official Python library for the anthropic API" -optional = false -python-versions = ">=3.7" -files = [ - {file = "anthropic-0.23.1-py3-none-any.whl", hash = "sha256:6dc5779dae83a5834864f4a4af0166c972b70f4cb8fd2765e1558282cc6d6242"}, - {file = "anthropic-0.23.1.tar.gz", hash = "sha256:9325103702cbc96bb09d1b58c36bde75c726f6a01029fb4d85f41ebba07e9066"}, -] - -[package.dependencies] -anyio = ">=3.5.0,<5" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -pydantic = ">=1.9.0,<3" -sniffio = "*" -tokenizers = ">=0.13.0" -typing-extensions = ">=4.7,<5" - -[package.extras] -bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"] -vertex = ["google-auth (>=2,<3)"] - [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, ] [package.dependencies] @@ -471,24 +433,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] - -[[package]] -name = "arxiv" -version = "2.1.0" -description = "Python wrapper for the arXiv API: https://arxiv.org/help/api/" -optional = false -python-versions = ">=3.7" -files = [ - {file = "arxiv-2.1.0-py3-none-any.whl", hash = "sha256:d634a0a59c9f05baf524eaa65563bb0a4532d2b4727a1162a1a9ba7e1e6e48cc"}, - {file = "arxiv-2.1.0.tar.gz", hash = "sha256:eb4b1d5ab9dfd66027c344bb324c20be21d56fe15f6ce216ed5b209df747dea8"}, -] - -[package.dependencies] -feedparser = "6.0.10" -requests = "2.31.0" +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "asgiref" @@ -551,69 +498,6 @@ files = [ [package.dependencies] cryptography = "*" -[[package]] -name = "azure-ai-inference" -version = "1.0.0b4" -description = "Microsoft Azure Ai Inference Client Library for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "azure-ai-inference-1.0.0b4.tar.gz", hash = "sha256:5464404bef337338d4af6eefde3af903400ddb8e5c9e6820f902303542fa0f72"}, - {file = "azure_ai_inference-1.0.0b4-py3-none-any.whl", hash = "sha256:e2c949f91845a8cd96cb9a61ffd432b5b0f4ce236b9be8c29d10f38e0a327412"}, -] - -[package.dependencies] -azure-core = ">=1.30.0" -isodate = ">=0.6.1" -typing-extensions = ">=4.6.0" - -[[package]] -name = "azure-ai-ml" -version = "1.20.0" -description = "Microsoft Azure Machine Learning Client Library for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "azure-ai-ml-1.20.0.tar.gz", hash = "sha256:6432a0da1b7250cb0db5a1c33202e0419935e19ea32d4c2b3220705f8f1d4101"}, - {file = "azure_ai_ml-1.20.0-py3-none-any.whl", hash = "sha256:c7eb3c5ccf82a6ee94403c3e5060763decd38cf03ff2620a4a6577526e605104"}, -] - -[package.dependencies] -azure-common = ">=1.1" -azure-core = ">=1.23.0" -azure-mgmt-core = ">=1.3.0" -azure-storage-blob = ">=12.10.0" -azure-storage-file-datalake = ">=12.2.0" -azure-storage-file-share = "*" -colorama = "*" -isodate = "*" -jsonschema = ">=4.0.0" -marshmallow = ">=3.5" -msrest = ">=0.6.18" -opencensus-ext-azure = "*" -opencensus-ext-logging = "*" -pydash = ">=6.0.0" -pyjwt = "*" -pyyaml = ">=5.1.0" -strictyaml = "*" -tqdm = "*" -typing-extensions = "*" - -[package.extras] -designer = ["mldesigner"] -mount = ["azureml-dataprep-rslex (>=2.22.0)"] - -[[package]] -name = "azure-common" -version = "1.1.28" -description = "Microsoft Azure Client Library for Python (Common)" -optional = false -python-versions = "*" -files = [ - {file = "azure-common-1.1.28.zip", hash = "sha256:4ac0cd3214e36b6a1b6a442686722a5d8cc449603aa833f3f0f40bda836704a3"}, - {file = "azure_common-1.1.28-py2.py3-none-any.whl", hash = "sha256:5c12d3dcf4ec20599ca6b0d3e09e86e146353d443e7fcc050c9a19c1f9df20ad"}, -] - [[package]] name = "azure-core" version = "1.31.0" @@ -650,20 +534,6 @@ cryptography = ">=2.5" msal = ">=1.24.0" msal-extensions = ">=0.3.0" -[[package]] -name = "azure-mgmt-core" -version = "1.4.0" -description = "Microsoft Azure Management Core Library for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "azure-mgmt-core-1.4.0.zip", hash = "sha256:d195208340094f98e5a6661b781cde6f6a051e79ce317caabd8ff97030a9b3ae"}, - {file = "azure_mgmt_core-1.4.0-py3-none-any.whl", hash = "sha256:81071675f186a585555ef01816f2774d49c1c9024cb76e5720c3c0f6b337bb7d"}, -] - -[package.dependencies] -azure-core = ">=1.26.2,<2.0.0" - [[package]] name = "azure-storage-blob" version = "12.13.0" @@ -680,42 +550,6 @@ azure-core = ">=1.23.1,<2.0.0" cryptography = ">=2.1.4" msrest = ">=0.6.21" -[[package]] -name = "azure-storage-file-datalake" -version = "12.8.0" -description = "Microsoft Azure File DataLake Storage Client Library for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "azure-storage-file-datalake-12.8.0.zip", hash = "sha256:12e6306e5efb5ca28e0ccd9fa79a2c61acd589866d6109fe5601b18509da92f4"}, - {file = "azure_storage_file_datalake-12.8.0-py3-none-any.whl", hash = "sha256:b6cf5733fe794bf3c866efbe3ce1941409e35b6b125028ac558b436bf90f2de7"}, -] - -[package.dependencies] -azure-core = ">=1.23.1,<2.0.0" -azure-storage-blob = ">=12.13.0,<13.0.0" -msrest = ">=0.6.21" - -[[package]] -name = "azure-storage-file-share" -version = "12.17.0" -description = "Microsoft Azure Azure File Share Storage Client Library for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "azure-storage-file-share-12.17.0.tar.gz", hash = "sha256:f7b2c6cfc1b7cb80097a53b1ed2efa9e545b49a291430d369cdb49fafbc841d6"}, - {file = "azure_storage_file_share-12.17.0-py3-none-any.whl", hash = "sha256:c4652759a9d529bf08881bb53275bf38774bb643746b849d27c47118f9cf923d"}, -] - -[package.dependencies] -azure-core = ">=1.28.0" -cryptography = ">=2.1.4" -isodate = ">=0.6.1" -typing-extensions = ">=4.6.0" - -[package.extras] -aio = ["azure-core[aio] (>=1.28.0)"] - [[package]] name = "backoff" version = "2.2.1" @@ -787,13 +621,13 @@ lxml = ["lxml"] [[package]] name = "billiard" -version = "4.2.0" +version = "4.2.1" description = "Python multiprocessing fork with improvements and bugfixes" optional = false python-versions = ">=3.7" files = [ - {file = "billiard-4.2.0-py3-none-any.whl", hash = "sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d"}, - {file = "billiard-4.2.0.tar.gz", hash = "sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c"}, + {file = "billiard-4.2.1-py3-none-any.whl", hash = "sha256:40b59a4ac8806ba2c2369ea98d876bc6108b051c227baffd928c644d15d8f3cb"}, + {file = "billiard-4.2.1.tar.gz", hash = "sha256:12b641b0c539073fc8d3f5b8b7be998956665c4233c7c1fcd66a7e677c4fb36f"}, ] [[package]] @@ -828,13 +662,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.19" +version = "1.35.29" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">=3.8" files = [ - {file = "botocore-1.35.19-py3-none-any.whl", hash = "sha256:c83f7f0cacfe7c19b109b363ebfa8736e570d24922f16ed371681f58ebab44a9"}, - {file = "botocore-1.35.19.tar.gz", hash = "sha256:42d6d8db7250cbd7899f786f9861e02cab17dc238f64d6acb976098ed9809625"}, + {file = "botocore-1.35.29-py3-none-any.whl", hash = "sha256:f8e3ae0d84214eff3fb69cb4dc51cea6c43d3bde82027a94d00c52b941d6c3d5"}, + {file = "botocore-1.35.29.tar.gz", hash = "sha256:4ed28ab03675bb008a290c452c5ddd7aaa5d4e3fa1912aadbdf93057ee84362b"}, ] [package.dependencies] @@ -1597,128 +1431,6 @@ pandas = ["pandas"] sqlalchemy = ["sqlalchemy (>1.3.21,<2.0)"] tzlocal = ["tzlocal (>=4.0)"] -[[package]] -name = "clickhouse-driver" -version = "0.2.9" -description = "Python driver with native interface for ClickHouse" -optional = false -python-versions = "<4,>=3.7" -files = [ - {file = "clickhouse-driver-0.2.9.tar.gz", hash = "sha256:050ea4870ead993910b39e7fae965dc1c347b2e8191dcd977cd4b385f9e19f87"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6ce04e9d0d0f39561f312d1ac1a8147bc9206e4267e1a23e20e0423ebac95534"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7ae5c8931bf290b9d85582e7955b9aad7f19ff9954e48caa4f9a180ea4d01078"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e51792f3bd12c32cb15a907f12de3c9d264843f0bb33dce400e3966c9f09a3f"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42fc546c31e4a04c97b749769335a679c9044dc693fa7a93e38c97fd6727173d"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a383a403d185185c64e49edd6a19b2ec973c5adcb8ebff7ed2fc539a2cc65a5"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f05321a97e816afc75b3e4f9eda989848fecf14ecf1a91d0f22c04258123d1f7"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be47e793846aac28442b6b1c6554e0731b848a5a7759a54aa2489997354efe4a"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:780e42a215d1ae2f6d695d74dd6f087781fb2fa51c508b58f79e68c24c5364e0"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9e28f1fe850675e173db586e9f1ac790e8f7edd507a4227cd54cd7445f8e75b6"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:125aae7f1308d3083dadbb3c78f828ae492e060f13e4007a0cf53a8169ed7b39"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2f3c4fbb61e75c62a1ab93a1070d362de4cb5682f82833b2c12deccb3bae888d"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dc03196a84e32d23b88b665be69afae98f57426f5fdf203e16715b756757961"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-win32.whl", hash = "sha256:25695d78a1d7ad6e221e800612eac08559f6182bf6dee0a220d08de7b612d993"}, - {file = "clickhouse_driver-0.2.9-cp310-cp310-win_amd64.whl", hash = "sha256:367acac95398d721a0a2a6cf87e93638c5588b79498a9848676ce7f182540a6c"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a7353a7a08eee3aa0001d8a5d771cb1f37e2acae1b48178002431f23892121a"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6af1c6cbc3481205503ab72a34aa76d6519249c904aa3f7a84b31e7b435555be"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48033803abd1100bfff6b9a1769d831b672cd3cda5147e0323b956fd1416d38d"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f202a58a540c85e47c31dabc8f84b6fe79dca5315c866450a538d58d6fa0571"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4df50fd84bfa4aa1eb7b52d48136066bfb64fabb7ceb62d4c318b45a296200b"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:433a650571a0d7766eb6f402e8f5930222997686c2ee01ded22f1d8fd46af9d4"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:232ee260475611cbf7adb554b81db6b5790b36e634fe2164f4ffcd2ca3e63a71"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:09049f7e71f15c9c9a03f597f77fc1f7b61ababd155c06c0d9e64d1453d945d7"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:424153d1d5f5a807f596a48cc88119f9fb3213ca7e38f57b8d15dcc964dd91f7"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:4f078fd1cf19c4ca63b8d1e0803df665310c8d5b644c5b02bf2465e8d6ef8f55"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f138d939e26e767537f891170b69a55a88038919f5c10d8865b67b8777fe4848"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9aafabc7e32942f85dcb46f007f447ab69024831575df97cae28c6ed127654d1"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-win32.whl", hash = "sha256:935e16ebf1a1998d8493979d858821a755503c9b8af572d9c450173d4b88868c"}, - {file = "clickhouse_driver-0.2.9-cp311-cp311-win_amd64.whl", hash = "sha256:306b3102cba278b5dfec6f5f7dc8b78416c403901510475c74913345b56c9e42"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fcb2fd00e58650ae206a6d5dbc83117240e622471aa5124733fbf2805eb8bda0"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b7a3e6b0a1eb218e3d870a94c76daaf65da46dca8f6888ea6542f94905c24d88"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8d8e2888a857d8db3d98765a5ad23ab561241feaef68bbffc5a0bd9c142342"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85d50c011467f5ff6772c4059345968b854b72e07a0219030b7c3f68419eb7f7"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93b395c1370629ccce8fb3e14cd5be2646d227bd32018c21f753c543e9a7e96b"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dbcee870c60d9835e5dce1456ab6b9d807e6669246357f4b321ef747b90fa43"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fffa5a5f317b1ec92e406a30a008929054cf3164d2324a3c465d0a0330273bf8"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:476702740a279744badbd177ae1c4a2d089ec128bd676861219d1f92078e4530"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5cd6d95fab5ff80e9dc9baedc9a926f62f74072d42d5804388d63b63bec0bb63"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:05027d32d7cf3e46cb8d04f8c984745ae01bd1bc7b3579f9dadf9b3cca735697"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:3d11831842250b4c1b26503a6e9c511fc03db096608b7c6af743818c421a3032"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:81b4b671b785ebb0b8aeabf2432e47072413d81db959eb8cfd8b6ab58c5799c6"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-win32.whl", hash = "sha256:e893bd4e014877174a59e032b0e99809c95ec61328a0e6bd9352c74a2f6111a8"}, - {file = "clickhouse_driver-0.2.9-cp312-cp312-win_amd64.whl", hash = "sha256:de6624e28eeffd01668803d28ae89e3d4e359b1bff8b60e4933e1cb3c6f86f18"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:909205324089a9ee59bee7ecbfa94595435118cca310fd62efdf13f225aa2965"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03f31d6e47dc2b0f367f598f5629147ed056d7216c1788e25190fcfbfa02e749"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed84179914b2b7bb434c2322a6e7fd83daa681c97a050450511b66d917a129bb"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67d1bf63efb4ba14ae6c6da99622e4a549e68fc3ee14d859bf611d8e6a61b3fa"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eed23ea41dd582d76f7a2ec7e09cbe5e9fec008f11a4799fa35ce44a3ebd283"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a654291132766efa2703058317749d7c69b69f02d89bac75703eaf7f775e20da"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c26c5ef16d0ef3cabc5bc03e827e01b0a4afb5b4eaf8850b7cf740cee04a1d4"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b57e83d7986d3cbda6096974a9510eb53cb33ad9072288c87c820ba5eee3370e"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:153cc03b36f22cbde55aa6a5bbe99072a025567a54c48b262eb0da15d8cd7c83"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:83a857d99192936091f495826ae97497cd1873af213b1e069d56369fb182ab8e"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb05a9bb22cbe9ad187ad268f86adf7e60df6083331fe59c01571b7b725212dd"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-win32.whl", hash = "sha256:3e282c5c25e32d96ed151e5460d2bf4ecb805ea64449197dd918e84e768016df"}, - {file = "clickhouse_driver-0.2.9-cp37-cp37m-win_amd64.whl", hash = "sha256:c46dccfb04a9afd61a1b0e60bfefceff917f76da2c863f9b36b39248496d5c77"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:612ca9028c718f362c97f552e63d313cf1a70a616ef8532ddb0effdaf12ebef9"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:471b884d318e012f68d858476052742048918854f7dfe87d78e819f87a848ffb"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58ee63c35e99da887eb035c8d6d9e64fd298a0efc1460395297dd5cc281a6912"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0819bb63d2c5025a1fb9589f57ef82602687cef11081d6dfa6f2ce44606a1772"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6680ee18870bca1fbab1736c8203a965efaec119ab4c37821ad99add248ee08"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:713c498741b54debd3a10a5529e70b6ed85ca33c3e8629e24ae5cd8160b5a5f2"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:730837b8f63941065c9c955c44286aef0987fb084ffb3f55bf1e4fe07df62269"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9f4e38b2ea09214c8e7848a19391009a18c56a3640e1ba1a606b9e57aeb63404"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:457f1d6639e0345b717ae603c79bd087a35361ce68c1c308d154b80b841e5e7d"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:49a55aeb8ea625a87965a96e361bbb1ad67d0931bfb2a575f899c1064e70c2da"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9230058d8c9b1a04079afae4650fb67745f0f1c39db335728f64d48bd2c19246"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8798258bd556542dd9c6b8ebe62f9c5110c9dcdf97c57fb077e7b8b6d6da0826"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-win32.whl", hash = "sha256:ce8e3f4be46bcc63555863f70ab0035202b082b37e6f16876ef50e7bc4b47056"}, - {file = "clickhouse_driver-0.2.9-cp38-cp38-win_amd64.whl", hash = "sha256:2d982959ff628255808d895a67493f2dab0c3a9bfc65eeda0f00c8ae9962a1b3"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a46b227fab4420566ed24ee70d90076226d16fcf09c6ad4d428717efcf536446"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7eaa2ce5ea08cf5fddebb8c274c450e102f329f9e6966b6cd85aa671c48e5552"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f97f0083194d6e23b5ef6156ed0d5388c37847b298118199d7937ba26412a9e2"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6cab5cdbb0f8ee51d879d977b78f07068b585225ac656f3c081896c362e8f83"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cdb1b011a53ee71539e9dc655f268b111bac484db300da92829ed59e910a8fd0"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf51bb761b281d20910b4b689c699ef98027845467daa5bb5dfdb53bd6ee404"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8ea462e3cebb121ff55002e9c8a9a0a3fd9b5bbbf688b4960f0a83c0172fb31"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:70bee21c245226ad0d637bf470472e2d487b86911b6d673a862127b934336ff4"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:253a3c223b944d691bf0abbd599f592ea3b36f0a71d2526833b1718f37eca5c2"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:a6549b53fc5c403dc556cb39b2ae94d73f9b113daa00438a660bb1dd5380ae4d"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1c685cd4abe61af1c26279ff04b9f567eb4d6c1ec7fb265af7481b1f153043aa"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7e25144219577491929d032a6c3ddd63c6cd7fa764af829a5637f798190d9b26"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-win32.whl", hash = "sha256:0b9925610d25405a8e6d83ff4f54fc2456a121adb0155999972f5edd6ba3efc8"}, - {file = "clickhouse_driver-0.2.9-cp39-cp39-win_amd64.whl", hash = "sha256:b243de483cfa02716053b0148d73558f4694f3c27b97fc1eaa97d7079563a14d"}, - {file = "clickhouse_driver-0.2.9-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:45a3d5b1d06750fd6a18c29b871494a2635670099ec7693e756a5885a4a70dbf"}, - {file = "clickhouse_driver-0.2.9-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8415ffebd6ca9eef3024763abc450f8659f1716d015bd563c537d01c7fbc3569"}, - {file = "clickhouse_driver-0.2.9-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace48db993aa4bd31c42de0fa8d38c94ad47405916d6b61f7a7168a48fb52ac1"}, - {file = "clickhouse_driver-0.2.9-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b07123334fe143bfe6fa4e3d4b732d647d5fd2cfb9ec7f2f76104b46fe9d20c6"}, - {file = "clickhouse_driver-0.2.9-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2af3efa73d296420ce6362789f5b1febf75d4aa159a479393f01549115509d5"}, - {file = "clickhouse_driver-0.2.9-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:baf57eede88d07a1eb04352d26fc58a4d97991ca3d8840f7c5d48691dec9f251"}, - {file = "clickhouse_driver-0.2.9-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:275d0ccdab9c3571bdb3e9acfab4497930aa584ff2766b035bb2f854deaf8b82"}, - {file = "clickhouse_driver-0.2.9-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:293da77bfcac3168fb35b27c242f97c1a05502435c0686ecbb8e2e4abcb3de26"}, - {file = "clickhouse_driver-0.2.9-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8d6c2e5830705e4eeef33070ca4d5a24dfa221f28f2f540e5e6842c26e70b10b"}, - {file = "clickhouse_driver-0.2.9-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:11934bd78d97dd7e1a23a6222b5edd1e1b4d34e1ead5c846dc2b5c56fdc35ff5"}, - {file = "clickhouse_driver-0.2.9-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b802b6f0fbdcc3ab81b87f09b694dde91ab049f44d1d2c08c3dc8ea9a5950cfa"}, - {file = "clickhouse_driver-0.2.9-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7af871c5315eb829ecf4533c790461ea8f73b3bfd5f533b0467e479fdf6ddcfd"}, - {file = "clickhouse_driver-0.2.9-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d577dd4867b9e26cf60590e1f500990c8701a6e3cfbb9e644f4d0c0fb607028"}, - {file = "clickhouse_driver-0.2.9-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ed3dea2d1eca85fef5b8564ddd76dedb15a610c77d55d555b49d9f7c896b64b"}, - {file = "clickhouse_driver-0.2.9-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:91ec96f2c48e5bdeac9eea43a9bc9cc19acb2d2c59df0a13d5520dfc32457605"}, - {file = "clickhouse_driver-0.2.9-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7667ab423452754f36ba8fb41e006a46baace9c94e2aca2a745689b9f2753dfb"}, - {file = "clickhouse_driver-0.2.9-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:653583b1f3b088d106f180d6f02c90917ecd669ec956b62903a05df4a7f44863"}, - {file = "clickhouse_driver-0.2.9-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef3dd0cbdf2f0171caab90389af0ede068ec802bf46c6a77f14e6edc86671bc"}, - {file = "clickhouse_driver-0.2.9-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11b1833ee8ff8d5df39a34a895e060b57bd81e05ea68822bc60476daff4ce1c8"}, - {file = "clickhouse_driver-0.2.9-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8a3195639e6393b9d4aafe736036881ff86b6be5855d4bf7d9f5c31637181ec3"}, -] - -[package.dependencies] -pytz = "*" -tzlocal = "*" - -[package.extras] -lz4 = ["clickhouse-cityhash (>=1.0.2.1)", "lz4", "lz4 (<=3.0.1)"] -numpy = ["numpy (>=1.12.0)", "pandas (>=0.24.0)"] -zstd = ["clickhouse-cityhash (>=1.0.2.1)", "zstd"] - [[package]] name = "cloudpickle" version = "2.2.1" @@ -1730,42 +1442,6 @@ files = [ {file = "cloudpickle-2.2.1.tar.gz", hash = "sha256:d89684b8de9e34a2a43b3460fbca07d09d6e25ce858df4d5a44240403b6178f5"}, ] -[[package]] -name = "cloudscraper" -version = "1.2.71" -description = "A Python module to bypass Cloudflare's anti-bot page." -optional = false -python-versions = "*" -files = [ - {file = "cloudscraper-1.2.71-py2.py3-none-any.whl", hash = "sha256:76f50ca529ed2279e220837befdec892626f9511708e200d48d5bb76ded679b0"}, - {file = "cloudscraper-1.2.71.tar.gz", hash = "sha256:429c6e8aa6916d5bad5c8a5eac50f3ea53c9ac22616f6cb21b18dcc71517d0d3"}, -] - -[package.dependencies] -pyparsing = ">=2.4.7" -requests = ">=2.9.2" -requests-toolbelt = ">=0.9.1" - -[[package]] -name = "cohere" -version = "5.2.6" -description = "" -optional = false -python-versions = "<4.0,>=3.8" -files = [ - {file = "cohere-5.2.6-py3-none-any.whl", hash = "sha256:256b4ed00f47eb315401d7f28834655714f098382908e7d0ad5c98225aa6a57d"}, - {file = "cohere-5.2.6.tar.gz", hash = "sha256:15d13682706fbafc8cf700e195f628389a643eb7ebd6d7c5e9d6e1ebd3f942fb"}, -] - -[package.dependencies] -fastavro = ">=1.9.4,<2.0.0" -httpx = ">=0.21.2" -pydantic = ">=1.9.2" -requests = ">=2.0.0,<3.0.0" -tokenizers = ">=0.15.2,<0.16.0" -types-requests = ">=2.0.0,<3.0.0" -typing_extensions = ">=4.0.0" - [[package]] name = "colorama" version = "0.4.6" @@ -1794,90 +1470,6 @@ humanfriendly = ">=9.1" [package.extras] cron = ["capturer (>=2.4)"] -[[package]] -name = "contourpy" -version = "1.3.0" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.9" -files = [ - {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, - {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, - {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, - {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, - {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, - {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, - {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, - {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, - {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, - {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, - {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, - {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, - {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, - {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, - {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, - {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, - {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, -] - -[package.dependencies] -numpy = ">=1.23" - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] - [[package]] name = "cos-python-sdk-v5" version = "1.9.30" @@ -2031,77 +1623,6 @@ ssh = ["bcrypt (>=3.1.5)"] test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] -[[package]] -name = "cssselect" -version = "1.2.0" -description = "cssselect parses CSS3 Selectors and translates them to XPath 1.0" -optional = false -python-versions = ">=3.7" -files = [ - {file = "cssselect-1.2.0-py2.py3-none-any.whl", hash = "sha256:da1885f0c10b60c03ed5eccbb6b68d6eff248d91976fcde348f395d54c9fd35e"}, - {file = "cssselect-1.2.0.tar.gz", hash = "sha256:666b19839cfaddb9ce9d36bfe4c969132c647b92fc9088c4e23f786b30f1b3dc"}, -] - -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - -[[package]] -name = "dashscope" -version = "1.17.1" -description = "dashscope client sdk library" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "dashscope-1.17.1-py3-none-any.whl", hash = "sha256:1e07e7ff4544684797f86ede646766b5ab8f5bd6eb43d2d01f0f757a2941efe1"}, -] - -[package.dependencies] -aiohttp = "*" -requests = "*" -tiktoken = {version = "*", optional = true, markers = "extra == \"tokenizer\""} - -[package.extras] -tokenizer = ["tiktoken"] - -[[package]] -name = "dataclass-wizard" -version = "0.22.3" -description = "Marshal dataclasses to/from JSON. Use field properties with initial values. Construct a dataclass schema with JSON input." -optional = false -python-versions = "*" -files = [ - {file = "dataclass-wizard-0.22.3.tar.gz", hash = "sha256:4c46591782265058f1148cfd1f54a3a91221e63986fdd04c9d59f4ced61f4424"}, - {file = "dataclass_wizard-0.22.3-py2.py3-none-any.whl", hash = "sha256:63751203e54b9b9349212cc185331da73c1adc99c51312575eb73bb5c00c1962"}, -] - -[package.extras] -dev = ["Sphinx (==5.3.0)", "bump2version (==1.0.1)", "coverage (>=6.2)", "dataclass-factory (==2.12)", "dataclasses-json (==0.5.6)", "flake8 (>=3)", "jsons (==1.6.1)", "pip (>=21.3.1)", "pytest (==7.0.1)", "pytest-cov (==3.0.0)", "pytest-mock (>=3.6.1)", "pytimeparse (==1.1.8)", "sphinx-issues (==3.0.1)", "sphinx-issues (==4.0.0)", "tox (==3.24.5)", "twine (==3.8.0)", "watchdog[watchmedo] (==2.1.6)", "wheel (==0.37.1)", "wheel (==0.42.0)"] -timedelta = ["pytimeparse (>=1.1.7)"] -yaml = ["PyYAML (>=5.3)"] - -[[package]] -name = "dataclasses" -version = "0.6" -description = "A backport of the dataclasses module for Python 3.6" -optional = false -python-versions = "*" -files = [ - {file = "dataclasses-0.6-py3-none-any.whl", hash = "sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f"}, - {file = "dataclasses-0.6.tar.gz", hash = "sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84"}, -] - [[package]] name = "dataclasses-json" version = "0.6.7" @@ -2117,23 +1638,6 @@ files = [ marshmallow = ">=3.18.0,<4.0.0" typing-inspect = ">=0.4.0,<1" -[[package]] -name = "db-dtypes" -version = "1.3.0" -description = "Pandas Data Types for SQL systems (BigQuery, Spanner)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "db_dtypes-1.3.0-py2.py3-none-any.whl", hash = "sha256:7e65c59f849ccbe6f7bc4d0253edcc212a7907662906921caba3e4aadd0bc277"}, - {file = "db_dtypes-1.3.0.tar.gz", hash = "sha256:7bcbc8858b07474dc85b77bb2f3ae488978d1336f5ea73b58c39d9118bc3e91b"}, -] - -[package.dependencies] -numpy = ">=1.16.6" -packaging = ">=17.0" -pandas = ">=0.24.2" -pyarrow = ">=3.0.0" - [[package]] name = "defusedxml" version = "0.7.1" @@ -2164,13 +1668,13 @@ dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -2240,79 +1744,15 @@ ply = ">=3.11,<4.0" typing_extensions = ">=4.0,<5.0" [[package]] -name = "duckdb" -version = "1.1.0" -description = "DuckDB in-process database" +name = "durationpy" +version = "0.7" +description = "Module for converting between datetime.timedelta and Go's Duration strings." optional = false -python-versions = ">=3.7.0" +python-versions = "*" files = [ - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:5e4cbc408e6e41146dea89b9044dae7356e353db0c96b183e5583ee02bc6ae5d"}, - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:6370ae27ec8167ccfbefb94f58ad9fdc7bac142399960549d6d367f233189868"}, - {file = "duckdb-1.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:4e1c3414f7fd01f4810dc8b335deffc91933a159282d65fef11c1286bc0ded04"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6bc2a58689adf5520303c5f68b065b9f980bd31f1366c541b8c7490abaf55cd"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d02be208d2885ca085d4c852b911493b8cdac9d6eae893259da32bd72a437c25"}, - {file = "duckdb-1.1.0-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:655df442ceebfc6f3fd6c8766e04b60d44dddedfa90275d794f9fab2d3180879"}, - {file = "duckdb-1.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6e183729bb64be7798ccbfda6283ebf423c869268c25af2b56929e48f763be2f"}, - {file = "duckdb-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:61fb838da51e07ceb0222c4406b059b90e10efcc453c19a3650b73c0112138c4"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:7807e2f0d3344668e433f0dc1f54bfaddd410589611393e9a7ed56f8dec9514f"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:3da30b7b466f710d52caa1fdc3ef0bf4176ad7f115953cd9f8b0fbf0f723778f"}, - {file = "duckdb-1.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:b9b6a77ef0183f561b1fc2945fcc762a71570ffd33fea4e3a855d413ed596fe4"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16243e66a9fd0e64ee265f2634d137adc6593f54ddf3ef55cb8a29e1decf6e54"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42b910a149e00f40a1766dc74fa309d4255b912a5d2fdcc387287658048650f6"}, - {file = "duckdb-1.1.0-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47849d546dc4238c0f20e95fe53b621aa5b08684e68fff91fd84a7092be91a17"}, - {file = "duckdb-1.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:11ec967b67159361ceade34095796a8d19368ea5c30cad988f44896b082b0816"}, - {file = "duckdb-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:510b5885ed6c267b9c0e1e7c6138fdffc2dd6f934a5a95b76da85da127213338"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:657bc7ac64d5faf069a782ae73afac51ef30ae2e5d0e09ce6a09d03db84ab35e"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:89f3de8cba57d19b41cd3c47dd06d979bd2a2ffead115480e37afbe72b02896d"}, - {file = "duckdb-1.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:f6486323ab20656d22ffa8f3c6e109dde30d0b327b7c831f22ebcfe747f97fb0"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78a4510f82431ee3f14db689fe8727a4a9062c8f2fbb3bcfe3bfad3c1a198004"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64bf2a6e23840d662bd2ac09206a9bd4fa657418884d69e5c352d4456dc70b3c"}, - {file = "duckdb-1.1.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23fc9aa0af74e3803ed90c8d98280fd5bcac8c940592bf6288e8fd60fb051d00"}, - {file = "duckdb-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1f3aea31341ce400640dd522e4399b941f66df17e39884f446638fe958d6117c"}, - {file = "duckdb-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:3db4ab31c20de4edaef152930836b38e7662cd71370748fdf2c38ba9cf854dc4"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3b6b4fe1edfe35f64f403a9f0ab75258cee35abd964356893ee37424174b7e4"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aad02f50d5a2020822d1638fc1a9bcf082056f11d2e15ccfc1c1ed4d0f85a3be"}, - {file = "duckdb-1.1.0-cp37-cp37m-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb66e9e7391801928ea134dcab12d2e4c97f2ce0391c603a3e480bbb15830bc8"}, - {file = "duckdb-1.1.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:069fb7bca459e31edb32a61f0eea95d7a8a766bef7b8318072563abf8e939593"}, - {file = "duckdb-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e39f9b7b62e64e10d421ff04480290a70129c38067d1a4f600e9212b10542c5a"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:55ef98bcc7ba745752607f1b926e8d9b7ce32c42c423bbad10c44820aefe23a7"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_universal2.whl", hash = "sha256:e2a08175e43b865c1e9611efd18cacd29ddd69093de442b1ebdf312071df7719"}, - {file = "duckdb-1.1.0-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:0e3644b1f034012d82b9baa12a7ea306fe71dc6623731b28c753c4a617ff9499"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:211a33c1ddb5cc609f75eb43772b0b03b45d2fa89bec107e4715267ca907806a"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e74b6f8a5145abbf7e6c1a2a61f0adbcd493c19b358f524ec9a3cebdf362abb"}, - {file = "duckdb-1.1.0-cp38-cp38-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58f1633dd2c5af5088ae2d119418e200855d0699d84f2fae9d46d30f404bcead"}, - {file = "duckdb-1.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d18caea926b1e301c29b140418fca697aad728129e269b4f82c2795a184549e1"}, - {file = "duckdb-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:cd9fb1408942411ad360f8414bc3fbf0091c396ca903d947a10f2e31324d5cbd"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bd11bc899cebf5ff936d1276a2dfb7b7db08aba3bcc42924afeafc2163bddb43"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_universal2.whl", hash = "sha256:53825a63193c582a78c152ea53de8d145744ddbeea18f452625a82ebc33eb14a"}, - {file = "duckdb-1.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:29dc18087de47563b3859a6b98bbed96e1c96ce5db829646dc3b16a916997e7d"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb19319883564237a7a03a104dbe7f445e73519bb67108fcab3d19b6b91fe30"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aac2fcabe2d5072c252d0b3087365f431de812d8199705089fb073e4d039d19c"}, - {file = "duckdb-1.1.0-cp39-cp39-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d89eaaa5df8a57e7d2bc1f4c46493bb1fee319a00155f2015810ad2ace6570ae"}, - {file = "duckdb-1.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d86a6926313913cd2cc7e08816d3e7f72ba340adf2959279b1a80058be6526d9"}, - {file = "duckdb-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:d8333f3e85fa2a0f1c222b752c2bd42ea875235ff88492f7bcbb6867d0f644eb"}, - {file = "duckdb-1.1.0.tar.gz", hash = "sha256:b4d4c12b1f98732151bd31377753e0da1a20f6423016d2d097d2e31953ec7c23"}, + {file = "durationpy-0.7.tar.gz", hash = "sha256:8447c43df4f1a0b434e70c15a38d77f5c9bd17284bfc1ff1d430f233d5083732"}, ] -[[package]] -name = "duckduckgo-search" -version = "6.2.12" -description = "Search for words, documents, images, news, maps and text translation using the DuckDuckGo.com search engine." -optional = false -python-versions = ">=3.8" -files = [ - {file = "duckduckgo_search-6.2.12-py3-none-any.whl", hash = "sha256:0d379c1f845b632a41553efb13d571788f19ad289229e641a27b5710d92097a6"}, - {file = "duckduckgo_search-6.2.12.tar.gz", hash = "sha256:04f9f1459763668d268344c7a32d943173d0e060dad53a5c2df4b4d3ca9a74cf"}, -] - -[package.dependencies] -click = ">=8.1.7" -primp = ">=0.6.2" - -[package.extras] -dev = ["mypy (>=1.11.1)", "pytest (>=8.3.1)", "pytest-asyncio (>=0.23.8)", "ruff (>=0.6.1)"] -lxml = ["lxml (>=5.2.2)"] - [[package]] name = "elastic-transport" version = "8.15.0" @@ -2356,18 +1796,15 @@ vectorstore-mmr = ["numpy (>=1)", "simsimd (>=3)"] [[package]] name = "emoji" -version = "2.12.1" +version = "2.13.2" description = "Emoji for Python" optional = false python-versions = ">=3.7" files = [ - {file = "emoji-2.12.1-py3-none-any.whl", hash = "sha256:a00d62173bdadc2510967a381810101624a2f0986145b8da0cffa42e29430235"}, - {file = "emoji-2.12.1.tar.gz", hash = "sha256:4aa0488817691aa58d83764b6c209f8a27c0b3ab3f89d1b8dceca1a62e4973eb"}, + {file = "emoji-2.13.2-py3-none-any.whl", hash = "sha256:ef6f2ee63b245e934c763b1a9a0637713955aa3d9e322432e036bb60559de4d6"}, + {file = "emoji-2.13.2.tar.gz", hash = "sha256:f95d10d96c5f21299ed2c4b32511611ba890b8c07f5f2bf5b04d5d3eee91fd19"}, ] -[package.dependencies] -typing-extensions = ">=4.7.0" - [package.extras] dev = ["coverage", "pytest (>=7.4.4)"] @@ -2432,13 +1869,13 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.114.2" +version = "0.115.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" files = [ - {file = "fastapi-0.114.2-py3-none-any.whl", hash = "sha256:44474a22913057b1acb973ab90f4b671ba5200482e7622816d79105dcece1ac5"}, - {file = "fastapi-0.114.2.tar.gz", hash = "sha256:0adb148b62edb09e8c6eeefa3ea934e8f276dabc038c5a82989ea6346050c3da"}, + {file = "fastapi-0.115.0-py3-none-any.whl", hash = "sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631"}, + {file = "fastapi-0.115.0.tar.gz", hash = "sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004"}, ] [package.dependencies] @@ -2450,95 +1887,20 @@ typing-extensions = ">=4.8.0" all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=2.11.2)", "python-multipart (>=0.0.7)", "uvicorn[standard] (>=0.12.0)"] -[[package]] -name = "fastavro" -version = "1.9.7" -description = "Fast read/write of AVRO files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fastavro-1.9.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc811fb4f7b5ae95f969cda910241ceacf82e53014c7c7224df6f6e0ca97f52f"}, - {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb8749e419a85f251bf1ac87d463311874972554d25d4a0b19f6bdc56036d7cf"}, - {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f9bafa167cb4d1c3dd17565cb5bf3d8c0759e42620280d1760f1e778e07fc"}, - {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e87d04b235b29f7774d226b120da2ca4e60b9e6fdf6747daef7f13f218b3517a"}, - {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b525c363e267ed11810aaad8fbdbd1c3bd8837d05f7360977d72a65ab8c6e1fa"}, - {file = "fastavro-1.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:6312fa99deecc319820216b5e1b1bd2d7ebb7d6f221373c74acfddaee64e8e60"}, - {file = "fastavro-1.9.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec8499dc276c2d2ef0a68c0f1ad11782b2b956a921790a36bf4c18df2b8d4020"}, - {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d9d96f98052615ab465c63ba8b76ed59baf2e3341b7b169058db104cbe2aa0"}, - {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919f3549e07a8a8645a2146f23905955c35264ac809f6c2ac18142bc5b9b6022"}, - {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9de1fa832a4d9016724cd6facab8034dc90d820b71a5d57c7e9830ffe90f31e4"}, - {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1d09227d1f48f13281bd5ceac958650805aef9a4ef4f95810128c1f9be1df736"}, - {file = "fastavro-1.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:2db993ae6cdc63e25eadf9f93c9e8036f9b097a3e61d19dca42536dcc5c4d8b3"}, - {file = "fastavro-1.9.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4e1289b731214a7315884c74b2ec058b6e84380ce9b18b8af5d387e64b18fc44"}, - {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eac69666270a76a3a1d0444f39752061195e79e146271a568777048ffbd91a27"}, - {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9be089be8c00f68e343bbc64ca6d9a13e5e5b0ba8aa52bcb231a762484fb270e"}, - {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d576eccfd60a18ffa028259500df67d338b93562c6700e10ef68bbd88e499731"}, - {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee9bf23c157bd7dcc91ea2c700fa3bd924d9ec198bb428ff0b47fa37fe160659"}, - {file = "fastavro-1.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:b6b2ccdc78f6afc18c52e403ee68c00478da12142815c1bd8a00973138a166d0"}, - {file = "fastavro-1.9.7-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7313def3aea3dacface0a8b83f6d66e49a311149aa925c89184a06c1ef99785d"}, - {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f5644737ad21d18af97d909dba099b9e7118c237be7e4bd087c7abde7e4f0"}, - {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2af559f30383b79cf7d020a6b644c42ffaed3595f775fe8f3d7f80b1c43dfdc5"}, - {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:edc28ab305e3c424de5ac5eb87b48d1e07eddb6aa08ef5948fcda33cc4d995ce"}, - {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ec2e96bdabd58427fe683329b3d79f42c7b4f4ff6b3644664a345a655ac2c0a1"}, - {file = "fastavro-1.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:3b683693c8a85ede496ebebe115be5d7870c150986e34a0442a20d88d7771224"}, - {file = "fastavro-1.9.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:58f76a5c9a312fbd37b84e49d08eb23094d36e10d43bc5df5187bc04af463feb"}, - {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56304401d2f4f69f5b498bdd1552c13ef9a644d522d5de0dc1d789cf82f47f73"}, - {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fcce036c6aa06269fc6a0428050fcb6255189997f5e1a728fc461e8b9d3e26b"}, - {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:17de68aae8c2525f5631d80f2b447a53395cdc49134f51b0329a5497277fc2d2"}, - {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7c911366c625d0a997eafe0aa83ffbc6fd00d8fd4543cb39a97c6f3b8120ea87"}, - {file = "fastavro-1.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:912283ed48578a103f523817fdf0c19b1755cea9b4a6387b73c79ecb8f8f84fc"}, - {file = "fastavro-1.9.7.tar.gz", hash = "sha256:13e11c6cb28626da85290933027cd419ce3f9ab8e45410ef24ce6b89d20a1f6c"}, -] - -[package.extras] -codecs = ["cramjam", "lz4", "zstandard"] -lz4 = ["lz4"] -snappy = ["cramjam"] -zstandard = ["zstandard"] - -[[package]] -name = "feedfinder2" -version = "0.0.4" -description = "Find the feed URLs for a website." -optional = false -python-versions = "*" -files = [ - {file = "feedfinder2-0.0.4.tar.gz", hash = "sha256:3701ee01a6c85f8b865a049c30ba0b4608858c803fe8e30d1d289fdbe89d0efe"}, -] - -[package.dependencies] -beautifulsoup4 = "*" -requests = "*" -six = "*" - -[[package]] -name = "feedparser" -version = "6.0.10" -description = "Universal feed parser, handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds" -optional = false -python-versions = ">=3.6" -files = [ - {file = "feedparser-6.0.10-py3-none-any.whl", hash = "sha256:79c257d526d13b944e965f6095700587f27388e50ea16fd245babe4dfae7024f"}, - {file = "feedparser-6.0.10.tar.gz", hash = "sha256:27da485f4637ce7163cdeab13a80312b93b7d0c1b775bef4a47629a3110bca51"}, -] - -[package.dependencies] -sgmllib3k = "*" - [[package]] name = "filelock" -version = "3.16.0" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.16.0-py3-none-any.whl", hash = "sha256:f6ed4c963184f4c84dd5557ce8fece759a3724b37b80c6c4f20a2f63a4dc6609"}, - {file = "filelock-3.16.0.tar.gz", hash = "sha256:81de9eb8453c769b63369f87f11131a7ab04e367f8d97ad39dc230daa07e3bec"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.1.1)", "pytest (>=8.3.2)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.3)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] typing = ["typing-extensions (>=4.12.2)"] [[package]] @@ -2655,24 +2017,6 @@ six = ">=1.3.0" [package.extras] docs = ["sphinx"] -[[package]] -name = "flask-sock" -version = "0.7.0" -description = "WebSocket support for Flask" -optional = false -python-versions = ">=3.6" -files = [ - {file = "flask-sock-0.7.0.tar.gz", hash = "sha256:e023b578284195a443b8d8bdb4469e6a6acf694b89aeb51315b1a34fcf427b7d"}, - {file = "flask_sock-0.7.0-py3-none-any.whl", hash = "sha256:caac4d679392aaf010d02fabcf73d52019f5bdaf1c9c131ec5a428cb3491204a"}, -] - -[package.dependencies] -flask = ">=2" -simple-websocket = ">=0.5.1" - -[package.extras] -docs = ["sphinx"] - [[package]] name = "flask-sqlalchemy" version = "3.1.1" @@ -2699,113 +2043,6 @@ files = [ {file = "flatbuffers-24.3.25.tar.gz", hash = "sha256:de2ec5b203f21441716617f38443e0a8ebf3d25bf0d9c0bb0ce68fa00ad546a4"}, ] -[[package]] -name = "fonttools" -version = "4.53.1" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.53.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0679a30b59d74b6242909945429dbddb08496935b82f91ea9bf6ad240ec23397"}, - {file = "fonttools-4.53.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8bf06b94694251861ba7fdeea15c8ec0967f84c3d4143ae9daf42bbc7717fe3"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b96cd370a61f4d083c9c0053bf634279b094308d52fdc2dd9a22d8372fdd590d"}, - {file = "fonttools-4.53.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1c7c5aa18dd3b17995898b4a9b5929d69ef6ae2af5b96d585ff4005033d82f0"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e013aae589c1c12505da64a7d8d023e584987e51e62006e1bb30d72f26522c41"}, - {file = "fonttools-4.53.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9efd176f874cb6402e607e4cc9b4a9cd584d82fc34a4b0c811970b32ba62501f"}, - {file = "fonttools-4.53.1-cp310-cp310-win32.whl", hash = "sha256:c8696544c964500aa9439efb6761947393b70b17ef4e82d73277413f291260a4"}, - {file = "fonttools-4.53.1-cp310-cp310-win_amd64.whl", hash = "sha256:8959a59de5af6d2bec27489e98ef25a397cfa1774b375d5787509c06659b3671"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:da33440b1413bad53a8674393c5d29ce64d8c1a15ef8a77c642ffd900d07bfe1"}, - {file = "fonttools-4.53.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ff7e5e9bad94e3a70c5cd2fa27f20b9bb9385e10cddab567b85ce5d306ea923"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6e7170d675d12eac12ad1a981d90f118c06cf680b42a2d74c6c931e54b50719"}, - {file = "fonttools-4.53.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee32ea8765e859670c4447b0817514ca79054463b6b79784b08a8df3a4d78e3"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6e08f572625a1ee682115223eabebc4c6a2035a6917eac6f60350aba297ccadb"}, - {file = "fonttools-4.53.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b21952c092ffd827504de7e66b62aba26fdb5f9d1e435c52477e6486e9d128b2"}, - {file = "fonttools-4.53.1-cp311-cp311-win32.whl", hash = "sha256:9dfdae43b7996af46ff9da520998a32b105c7f098aeea06b2226b30e74fbba88"}, - {file = "fonttools-4.53.1-cp311-cp311-win_amd64.whl", hash = "sha256:d4d0096cb1ac7a77b3b41cd78c9b6bc4a400550e21dc7a92f2b5ab53ed74eb02"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d92d3c2a1b39631a6131c2fa25b5406855f97969b068e7e08413325bc0afba58"}, - {file = "fonttools-4.53.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3b3c8ebafbee8d9002bd8f1195d09ed2bd9ff134ddec37ee8f6a6375e6a4f0e8"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32f029c095ad66c425b0ee85553d0dc326d45d7059dbc227330fc29b43e8ba60"}, - {file = "fonttools-4.53.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10f5e6c3510b79ea27bb1ebfcc67048cde9ec67afa87c7dd7efa5c700491ac7f"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f677ce218976496a587ab17140da141557beb91d2a5c1a14212c994093f2eae2"}, - {file = "fonttools-4.53.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9e6ceba2a01b448e36754983d376064730690401da1dd104ddb543519470a15f"}, - {file = "fonttools-4.53.1-cp312-cp312-win32.whl", hash = "sha256:791b31ebbc05197d7aa096bbc7bd76d591f05905d2fd908bf103af4488e60670"}, - {file = "fonttools-4.53.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ed170b5e17da0264b9f6fae86073be3db15fa1bd74061c8331022bca6d09bab"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c818c058404eb2bba05e728d38049438afd649e3c409796723dfc17cd3f08749"}, - {file = "fonttools-4.53.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:651390c3b26b0c7d1f4407cad281ee7a5a85a31a110cbac5269de72a51551ba2"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e54f1bba2f655924c1138bbc7fa91abd61f45c68bd65ab5ed985942712864bbb"}, - {file = "fonttools-4.53.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9cd19cf4fe0595ebdd1d4915882b9440c3a6d30b008f3cc7587c1da7b95be5f"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2af40ae9cdcb204fc1d8f26b190aa16534fcd4f0df756268df674a270eab575d"}, - {file = "fonttools-4.53.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:35250099b0cfb32d799fb5d6c651220a642fe2e3c7d2560490e6f1d3f9ae9169"}, - {file = "fonttools-4.53.1-cp38-cp38-win32.whl", hash = "sha256:f08df60fbd8d289152079a65da4e66a447efc1d5d5a4d3f299cdd39e3b2e4a7d"}, - {file = "fonttools-4.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:7b6b35e52ddc8fb0db562133894e6ef5b4e54e1283dff606fda3eed938c36fc8"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75a157d8d26c06e64ace9df037ee93a4938a4606a38cb7ffaf6635e60e253b7a"}, - {file = "fonttools-4.53.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4824c198f714ab5559c5be10fd1adf876712aa7989882a4ec887bf1ef3e00e31"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:becc5d7cb89c7b7afa8321b6bb3dbee0eec2b57855c90b3e9bf5fb816671fa7c"}, - {file = "fonttools-4.53.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ec3fb43befb54be490147b4a922b5314e16372a643004f182babee9f9c3407"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:73379d3ffdeecb376640cd8ed03e9d2d0e568c9d1a4e9b16504a834ebadc2dfb"}, - {file = "fonttools-4.53.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:02569e9a810f9d11f4ae82c391ebc6fb5730d95a0657d24d754ed7763fb2d122"}, - {file = "fonttools-4.53.1-cp39-cp39-win32.whl", hash = "sha256:aae7bd54187e8bf7fd69f8ab87b2885253d3575163ad4d669a262fe97f0136cb"}, - {file = "fonttools-4.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:e5b708073ea3d684235648786f5f6153a48dc8762cdfe5563c57e80787c29fbb"}, - {file = "fonttools-4.53.1-py3-none-any.whl", hash = "sha256:f1f8758a2ad110bd6432203a344269f445a2907dc24ef6bccfd0ac4e14e0d71d"}, - {file = "fonttools-4.53.1.tar.gz", hash = "sha256:e128778a8e9bc11159ce5447f76766cefbd876f44bd79aff030287254e4752c4"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "pycairo", "scipy"] -lxml = ["lxml (>=4.0)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.1.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - -[[package]] -name = "frozendict" -version = "2.4.4" -description = "A simple immutable dictionary" -optional = false -python-versions = ">=3.6" -files = [ - {file = "frozendict-2.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4a59578d47b3949437519b5c39a016a6116b9e787bb19289e333faae81462e59"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12a342e439aef28ccec533f0253ea53d75fe9102bd6ea928ff530e76eac38906"}, - {file = "frozendict-2.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f79c26dff10ce11dad3b3627c89bb2e87b9dd5958c2b24325f16a23019b8b94"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2bd009cf4fc47972838a91e9b83654dc9a095dc4f2bb3a37c3f3124c8a364543"}, - {file = "frozendict-2.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:87ebcde21565a14fe039672c25550060d6f6d88cf1f339beac094c3b10004eb0"}, - {file = "frozendict-2.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:fefeb700bc7eb8b4c2dc48704e4221860d254c8989fb53488540bc44e44a1ac2"}, - {file = "frozendict-2.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:4297d694eb600efa429769125a6f910ec02b85606f22f178bafbee309e7d3ec7"}, - {file = "frozendict-2.4.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:812ab17522ba13637826e65454115a914c2da538356e85f43ecea069813e4b33"}, - {file = "frozendict-2.4.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fee9420475bb6ff357000092aa9990c2f6182b2bab15764330f4ad7de2eae49"}, - {file = "frozendict-2.4.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3148062675536724502c6344d7c485dd4667fdf7980ca9bd05e338ccc0c4471e"}, - {file = "frozendict-2.4.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:78c94991944dd33c5376f720228e5b252ee67faf3bac50ef381adc9e51e90d9d"}, - {file = "frozendict-2.4.4-cp36-cp36m-win_amd64.whl", hash = "sha256:1697793b5f62b416c0fc1d94638ec91ed3aa4ab277f6affa3a95216ecb3af170"}, - {file = "frozendict-2.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:199a4d32194f3afed6258de7e317054155bc9519252b568d9cfffde7e4d834e5"}, - {file = "frozendict-2.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85375ec6e979e6373bffb4f54576a68bf7497c350861d20686ccae38aab69c0a"}, - {file = "frozendict-2.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2d8536e068d6bf281f23fa835ac07747fb0f8851879dd189e9709f9567408b4d"}, - {file = "frozendict-2.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:259528ba6b56fa051bc996f1c4d8b57e30d6dd3bc2f27441891b04babc4b5e73"}, - {file = "frozendict-2.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:07c3a5dee8bbb84cba770e273cdbf2c87c8e035903af8f781292d72583416801"}, - {file = "frozendict-2.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6874fec816b37b6eb5795b00e0574cba261bf59723e2de607a195d5edaff0786"}, - {file = "frozendict-2.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f92425686323a950337da4b75b4c17a3327b831df8c881df24038d560640d4"}, - {file = "frozendict-2.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d58d9a8d9e49662c6dafbea5e641f97decdb3d6ccd76e55e79818415362ba25"}, - {file = "frozendict-2.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:93a7b19afb429cbf99d56faf436b45ef2fa8fe9aca89c49eb1610c3bd85f1760"}, - {file = "frozendict-2.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2b70b431e3a72d410a2cdf1497b3aba2f553635e0c0f657ce311d841bf8273b6"}, - {file = "frozendict-2.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:e1b941132d79ce72d562a13341d38fc217bc1ee24d8c35a20d754e79ff99e038"}, - {file = "frozendict-2.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dc2228874eacae390e63fd4f2bb513b3144066a977dc192163c9f6c7f6de6474"}, - {file = "frozendict-2.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63aa49f1919af7d45fb8fd5dec4c0859bc09f46880bd6297c79bb2db2969b63d"}, - {file = "frozendict-2.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6bf9260018d653f3cab9bd147bd8592bf98a5c6e338be0491ced3c196c034a3"}, - {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6eb716e6a6d693c03b1d53280a1947716129f5ef9bcdd061db5c17dea44b80fe"}, - {file = "frozendict-2.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d13b4310db337f4d2103867c5a05090b22bc4d50ca842093779ef541ea9c9eea"}, - {file = "frozendict-2.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:b3b967d5065872e27b06f785a80c0ed0a45d1f7c9b85223da05358e734d858ca"}, - {file = "frozendict-2.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:4ae8d05c8d0b6134bfb6bfb369d5fa0c4df21eabb5ca7f645af95fdc6689678e"}, - {file = "frozendict-2.4.4-py311-none-any.whl", hash = "sha256:705efca8d74d3facbb6ace80ab3afdd28eb8a237bfb4063ed89996b024bc443d"}, - {file = "frozendict-2.4.4-py312-none-any.whl", hash = "sha256:d9647563e76adb05b7cde2172403123380871360a114f546b4ae1704510801e5"}, - {file = "frozendict-2.4.4.tar.gz", hash = "sha256:3f7c031b26e4ee6a3f786ceb5e3abf1181c4ade92dce1f847da26ea2c96008c7"}, -] - [[package]] name = "frozenlist" version = "1.4.1" @@ -3211,30 +2448,30 @@ xai = ["tensorflow (>=2.3.0,<3.0.0dev)"] [[package]] name = "google-cloud-bigquery" -version = "3.25.0" +version = "3.26.0" description = "Google BigQuery API client library" optional = false python-versions = ">=3.7" files = [ - {file = "google-cloud-bigquery-3.25.0.tar.gz", hash = "sha256:5b2aff3205a854481117436836ae1403f11f2594e6810a98886afd57eda28509"}, - {file = "google_cloud_bigquery-3.25.0-py2.py3-none-any.whl", hash = "sha256:7f0c371bc74d2a7fb74dacbc00ac0f90c8c2bec2289b51dd6685a275873b1ce9"}, + {file = "google_cloud_bigquery-3.26.0-py2.py3-none-any.whl", hash = "sha256:e0e9ad28afa67a18696e624cbccab284bf2c0a3f6eeb9eeb0426c69b943793a8"}, + {file = "google_cloud_bigquery-3.26.0.tar.gz", hash = "sha256:edbdc788beea659e04c0af7fe4dcd6d9155344b98951a0d5055bd2f15da4ba23"}, ] [package.dependencies] -google-api-core = {version = ">=1.34.1,<2.0.dev0 || >=2.11.dev0,<3.0.0dev", extras = ["grpc"]} +google-api-core = {version = ">=2.11.1,<3.0.0dev", extras = ["grpc"]} google-auth = ">=2.14.1,<3.0.0dev" -google-cloud-core = ">=1.6.0,<3.0.0dev" -google-resumable-media = ">=0.6.0,<3.0dev" +google-cloud-core = ">=2.4.1,<3.0.0dev" +google-resumable-media = ">=2.0.0,<3.0dev" packaging = ">=20.0.0" -python-dateutil = ">=2.7.2,<3.0dev" +python-dateutil = ">=2.7.3,<3.0dev" requests = ">=2.21.0,<3.0.0dev" [package.extras] -all = ["Shapely (>=1.8.4,<3.0.0dev)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] -bigquery-v2 = ["proto-plus (>=1.15.0,<2.0.0dev)", "protobuf (>=3.19.5,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev)"] +all = ["Shapely (>=1.8.4,<3.0.0dev)", "bigquery-magics (>=0.1.0)", "db-dtypes (>=0.3.0,<2.0.0dev)", "geopandas (>=0.9.0,<1.0dev)", "google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "importlib-metadata (>=1.0.0)", "ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)", "opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)", "pandas (>=1.1.0)", "proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)", "pyarrow (>=3.0.0)", "tqdm (>=4.7.4,<5.0.0dev)"] +bigquery-v2 = ["proto-plus (>=1.22.3,<2.0.0dev)", "protobuf (>=3.20.2,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev)"] bqstorage = ["google-cloud-bigquery-storage (>=2.6.0,<3.0.0dev)", "grpcio (>=1.47.0,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "pyarrow (>=3.0.0)"] geopandas = ["Shapely (>=1.8.4,<3.0.0dev)", "geopandas (>=0.9.0,<1.0dev)"] -ipython = ["ipykernel (>=6.0.0)", "ipython (>=7.23.1,!=8.1.0)"] +ipython = ["bigquery-magics (>=0.1.0)"] ipywidgets = ["ipykernel (>=6.0.0)", "ipywidgets (>=7.7.0)"] opentelemetry = ["opentelemetry-api (>=1.1.0)", "opentelemetry-instrumentation (>=0.20b0)", "opentelemetry-sdk (>=1.1.0)"] pandas = ["db-dtypes (>=0.3.0,<2.0.0dev)", "importlib-metadata (>=1.0.0)", "pandas (>=1.1.0)", "pyarrow (>=3.0.0)"] @@ -3413,77 +2650,84 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "greenlet" -version = "3.1.0" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99"}, - {file = "greenlet-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54"}, - {file = "greenlet-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b"}, - {file = "greenlet-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9"}, - {file = "greenlet-3.1.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811"}, - {file = "greenlet-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b"}, - {file = "greenlet-3.1.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484"}, - {file = "greenlet-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3"}, - {file = "greenlet-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00"}, - {file = "greenlet-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6"}, - {file = "greenlet-3.1.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0"}, - {file = "greenlet-3.1.0-cp38-cp38-win32.whl", hash = "sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2"}, - {file = "greenlet-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910"}, - {file = "greenlet-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8"}, - {file = "greenlet-3.1.0-cp39-cp39-win32.whl", hash = "sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc"}, - {file = "greenlet-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97"}, - {file = "greenlet-3.1.0.tar.gz", hash = "sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -3508,61 +2752,70 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4 [[package]] name = "grpcio" -version = "1.66.1" +version = "1.66.2" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.66.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:4877ba180591acdf127afe21ec1c7ff8a5ecf0fe2600f0d3c50e8c4a1cbc6492"}, - {file = "grpcio-1.66.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:3750c5a00bd644c75f4507f77a804d0189d97a107eb1481945a0cf3af3e7a5ac"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:a013c5fbb12bfb5f927444b477a26f1080755a931d5d362e6a9a720ca7dbae60"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1b24c23d51a1e8790b25514157d43f0a4dce1ac12b3f0b8e9f66a5e2c4c132f"}, - {file = "grpcio-1.66.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7ffb8ea674d68de4cac6f57d2498fef477cef582f1fa849e9f844863af50083"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:307b1d538140f19ccbd3aed7a93d8f71103c5d525f3c96f8616111614b14bf2a"}, - {file = "grpcio-1.66.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1c17ebcec157cfb8dd445890a03e20caf6209a5bd4ac5b040ae9dbc59eef091d"}, - {file = "grpcio-1.66.1-cp310-cp310-win32.whl", hash = "sha256:ef82d361ed5849d34cf09105d00b94b6728d289d6b9235513cb2fcc79f7c432c"}, - {file = "grpcio-1.66.1-cp310-cp310-win_amd64.whl", hash = "sha256:292a846b92cdcd40ecca46e694997dd6b9be6c4c01a94a0dfb3fcb75d20da858"}, - {file = "grpcio-1.66.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:c30aeceeaff11cd5ddbc348f37c58bcb96da8d5aa93fed78ab329de5f37a0d7a"}, - {file = "grpcio-1.66.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8a1e224ce6f740dbb6b24c58f885422deebd7eb724aff0671a847f8951857c26"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a66fe4dc35d2330c185cfbb42959f57ad36f257e0cc4557d11d9f0a3f14311df"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3ba04659e4fce609de2658fe4dbf7d6ed21987a94460f5f92df7579fd5d0e22"}, - {file = "grpcio-1.66.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4573608e23f7e091acfbe3e84ac2045680b69751d8d67685ffa193a4429fedb1"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7e06aa1f764ec8265b19d8f00140b8c4b6ca179a6dc67aa9413867c47e1fb04e"}, - {file = "grpcio-1.66.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3885f037eb11f1cacc41f207b705f38a44b69478086f40608959bf5ad85826dd"}, - {file = "grpcio-1.66.1-cp311-cp311-win32.whl", hash = "sha256:97ae7edd3f3f91480e48ede5d3e7d431ad6005bfdbd65c1b56913799ec79e791"}, - {file = "grpcio-1.66.1-cp311-cp311-win_amd64.whl", hash = "sha256:cfd349de4158d797db2bd82d2020554a121674e98fbe6b15328456b3bf2495bb"}, - {file = "grpcio-1.66.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:a92c4f58c01c77205df6ff999faa008540475c39b835277fb8883b11cada127a"}, - {file = "grpcio-1.66.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fdb14bad0835914f325349ed34a51940bc2ad965142eb3090081593c6e347be9"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f03a5884c56256e08fd9e262e11b5cfacf1af96e2ce78dc095d2c41ccae2c80d"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ca2559692d8e7e245d456877a85ee41525f3ed425aa97eb7a70fc9a79df91a0"}, - {file = "grpcio-1.66.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84ca1be089fb4446490dd1135828bd42a7c7f8421e74fa581611f7afdf7ab761"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d639c939ad7c440c7b2819a28d559179a4508783f7e5b991166f8d7a34b52815"}, - {file = "grpcio-1.66.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b9feb4e5ec8dc2d15709f4d5fc367794d69277f5d680baf1910fc9915c633524"}, - {file = "grpcio-1.66.1-cp312-cp312-win32.whl", hash = "sha256:7101db1bd4cd9b880294dec41a93fcdce465bdbb602cd8dc5bd2d6362b618759"}, - {file = "grpcio-1.66.1-cp312-cp312-win_amd64.whl", hash = "sha256:b0aa03d240b5539648d996cc60438f128c7f46050989e35b25f5c18286c86734"}, - {file = "grpcio-1.66.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:ecfe735e7a59e5a98208447293ff8580e9db1e890e232b8b292dc8bd15afc0d2"}, - {file = "grpcio-1.66.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4825a3aa5648010842e1c9d35a082187746aa0cdbf1b7a2a930595a94fb10fce"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f517fd7259fe823ef3bd21e508b653d5492e706e9f0ef82c16ce3347a8a5620c"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1fe60d0772831d96d263b53d83fb9a3d050a94b0e94b6d004a5ad111faa5b5b"}, - {file = "grpcio-1.66.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31a049daa428f928f21090403e5d18ea02670e3d5d172581670be006100db9ef"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f914386e52cbdeb5d2a7ce3bf1fdfacbe9d818dd81b6099a05b741aaf3848bb"}, - {file = "grpcio-1.66.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bff2096bdba686019fb32d2dde45b95981f0d1490e054400f70fc9a8af34b49d"}, - {file = "grpcio-1.66.1-cp38-cp38-win32.whl", hash = "sha256:aa8ba945c96e73de29d25331b26f3e416e0c0f621e984a3ebdb2d0d0b596a3b3"}, - {file = "grpcio-1.66.1-cp38-cp38-win_amd64.whl", hash = "sha256:161d5c535c2bdf61b95080e7f0f017a1dfcb812bf54093e71e5562b16225b4ce"}, - {file = "grpcio-1.66.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:d0cd7050397b3609ea51727b1811e663ffda8bda39c6a5bb69525ef12414b503"}, - {file = "grpcio-1.66.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0e6c9b42ded5d02b6b1fea3a25f036a2236eeb75d0579bfd43c0018c88bf0a3e"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:c9f80f9fad93a8cf71c7f161778ba47fd730d13a343a46258065c4deb4b550c0"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dd67ed9da78e5121efc5c510f0122a972216808d6de70953a740560c572eb44"}, - {file = "grpcio-1.66.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48b0d92d45ce3be2084b92fb5bae2f64c208fea8ceed7fccf6a7b524d3c4942e"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4d813316d1a752be6f5c4360c49f55b06d4fe212d7df03253dfdae90c8a402bb"}, - {file = "grpcio-1.66.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c9bebc6627873ec27a70fc800f6083a13c70b23a5564788754b9ee52c5aef6c"}, - {file = "grpcio-1.66.1-cp39-cp39-win32.whl", hash = "sha256:30a1c2cf9390c894c90bbc70147f2372130ad189cffef161f0432d0157973f45"}, - {file = "grpcio-1.66.1-cp39-cp39-win_amd64.whl", hash = "sha256:17663598aadbedc3cacd7bbde432f541c8e07d2496564e22b214b22c7523dac8"}, - {file = "grpcio-1.66.1.tar.gz", hash = "sha256:35334f9c9745add3e357e3372756fd32d925bd52c41da97f4dfdafbde0bf0ee2"}, + {file = "grpcio-1.66.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:fe96281713168a3270878255983d2cb1a97e034325c8c2c25169a69289d3ecfa"}, + {file = "grpcio-1.66.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:73fc8f8b9b5c4a03e802b3cd0c18b2b06b410d3c1dcbef989fdeb943bd44aff7"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:03b0b307ba26fae695e067b94cbb014e27390f8bc5ac7a3a39b7723fed085604"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d69ce1f324dc2d71e40c9261d3fdbe7d4c9d60f332069ff9b2a4d8a257c7b2b"}, + {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05bc2ceadc2529ab0b227b1310d249d95d9001cd106aa4d31e8871ad3c428d73"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ac475e8da31484efa25abb774674d837b343afb78bb3bcdef10f81a93e3d6bf"}, + {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0be4e0490c28da5377283861bed2941d1d20ec017ca397a5df4394d1c31a9b50"}, + {file = "grpcio-1.66.2-cp310-cp310-win32.whl", hash = "sha256:4e504572433f4e72b12394977679161d495c4c9581ba34a88d843eaf0f2fbd39"}, + {file = "grpcio-1.66.2-cp310-cp310-win_amd64.whl", hash = "sha256:2018b053aa15782db2541ca01a7edb56a0bf18c77efed975392583725974b249"}, + {file = "grpcio-1.66.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:2335c58560a9e92ac58ff2bc5649952f9b37d0735608242973c7a8b94a6437d8"}, + {file = "grpcio-1.66.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a3d462826f4868b442a6b8fdbe8b87b45eb4f5b5308168c156b21eca43f61c"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a9539f01cb04950fd4b5ab458e64a15f84c2acc273670072abe49a3f29bbad54"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce89f5876662f146d4c1f695dda29d4433a5d01c8681fbd2539afff535da14d4"}, + {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25a14af966438cddf498b2e338f88d1c9706f3493b1d73b93f695c99c5f0e2a"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6001e575b8bbd89eee11960bb640b6da6ae110cf08113a075f1e2051cc596cae"}, + {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ea1d062c9230278793820146c95d038dc0f468cbdd172eec3363e42ff1c7d01"}, + {file = "grpcio-1.66.2-cp311-cp311-win32.whl", hash = "sha256:38b68498ff579a3b1ee8f93a05eb48dc2595795f2f62716e797dc24774c1aaa8"}, + {file = "grpcio-1.66.2-cp311-cp311-win_amd64.whl", hash = "sha256:6851de821249340bdb100df5eacfecfc4e6075fa85c6df7ee0eb213170ec8e5d"}, + {file = "grpcio-1.66.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:802d84fd3d50614170649853d121baaaa305de7b65b3e01759247e768d691ddf"}, + {file = "grpcio-1.66.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80fd702ba7e432994df208f27514280b4b5c6843e12a48759c9255679ad38db8"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:12fda97ffae55e6526825daf25ad0fa37483685952b5d0f910d6405c87e3adb6"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:950da58d7d80abd0ea68757769c9db0a95b31163e53e5bb60438d263f4bed7b7"}, + {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e636ce23273683b00410f1971d209bf3689238cf5538d960adc3cdfe80dd0dbd"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a917d26e0fe980b0ac7bfcc1a3c4ad6a9a4612c911d33efb55ed7833c749b0ee"}, + {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49f0ca7ae850f59f828a723a9064cadbed90f1ece179d375966546499b8a2c9c"}, + {file = "grpcio-1.66.2-cp312-cp312-win32.whl", hash = "sha256:31fd163105464797a72d901a06472860845ac157389e10f12631025b3e4d0453"}, + {file = "grpcio-1.66.2-cp312-cp312-win_amd64.whl", hash = "sha256:ff1f7882e56c40b0d33c4922c15dfa30612f05fb785074a012f7cda74d1c3679"}, + {file = "grpcio-1.66.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:3b00efc473b20d8bf83e0e1ae661b98951ca56111feb9b9611df8efc4fe5d55d"}, + {file = "grpcio-1.66.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1caa38fb22a8578ab8393da99d4b8641e3a80abc8fd52646f1ecc92bcb8dee34"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c408f5ef75cfffa113cacd8b0c0e3611cbfd47701ca3cdc090594109b9fcbaed"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c806852deaedee9ce8280fe98955c9103f62912a5b2d5ee7e3eaa284a6d8d8e7"}, + {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f145cc21836c332c67baa6fc81099d1d27e266401565bf481948010d6ea32d46"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:73e3b425c1e155730273f73e419de3074aa5c5e936771ee0e4af0814631fb30a"}, + {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9c509a4f78114cbc5f0740eb3d7a74985fd2eff022971bc9bc31f8bc93e66a3b"}, + {file = "grpcio-1.66.2-cp313-cp313-win32.whl", hash = "sha256:20657d6b8cfed7db5e11b62ff7dfe2e12064ea78e93f1434d61888834bc86d75"}, + {file = "grpcio-1.66.2-cp313-cp313-win_amd64.whl", hash = "sha256:fb70487c95786e345af5e854ffec8cb8cc781bcc5df7930c4fbb7feaa72e1cdf"}, + {file = "grpcio-1.66.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:a18e20d8321c6400185b4263e27982488cb5cdd62da69147087a76a24ef4e7e3"}, + {file = "grpcio-1.66.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:02697eb4a5cbe5a9639f57323b4c37bcb3ab2d48cec5da3dc2f13334d72790dd"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:99a641995a6bc4287a6315989ee591ff58507aa1cbe4c2e70d88411c4dcc0839"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3ed71e81782966ffead60268bbda31ea3f725ebf8aa73634d5dda44f2cf3fb9c"}, + {file = "grpcio-1.66.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbd27c24a4cc5e195a7f56cfd9312e366d5d61b86e36d46bbe538457ea6eb8dd"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d9a9724a156c8ec6a379869b23ba3323b7ea3600851c91489b871e375f710bc8"}, + {file = "grpcio-1.66.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d8d4732cc5052e92cea2f78b233c2e2a52998ac40cd651f40e398893ad0d06ec"}, + {file = "grpcio-1.66.2-cp38-cp38-win32.whl", hash = "sha256:7b2c86457145ce14c38e5bf6bdc19ef88e66c5fee2c3d83285c5aef026ba93b3"}, + {file = "grpcio-1.66.2-cp38-cp38-win_amd64.whl", hash = "sha256:e88264caad6d8d00e7913996030bac8ad5f26b7411495848cc218bd3a9040b6c"}, + {file = "grpcio-1.66.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:c400ba5675b67025c8a9f48aa846f12a39cf0c44df5cd060e23fda5b30e9359d"}, + {file = "grpcio-1.66.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:66a0cd8ba6512b401d7ed46bb03f4ee455839957f28b8d61e7708056a806ba6a"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:06de8ec0bd71be123eec15b0e0d457474931c2c407869b6c349bd9bed4adbac3"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb57870449dfcfac428afbb5a877829fcb0d6db9d9baa1148705739e9083880e"}, + {file = "grpcio-1.66.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b672abf90a964bfde2d0ecbce30f2329a47498ba75ce6f4da35a2f4532b7acbc"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ad2efdbe90c73b0434cbe64ed372e12414ad03c06262279b104a029d1889d13e"}, + {file = "grpcio-1.66.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9c3a99c519f4638e700e9e3f83952e27e2ea10873eecd7935823dab0c1c9250e"}, + {file = "grpcio-1.66.2-cp39-cp39-win32.whl", hash = "sha256:78fa51ebc2d9242c0fc5db0feecc57a9943303b46664ad89921f5079e2e4ada7"}, + {file = "grpcio-1.66.2-cp39-cp39-win_amd64.whl", hash = "sha256:728bdf36a186e7f51da73be7f8d09457a03061be848718d0edf000e709418987"}, + {file = "grpcio-1.66.2.tar.gz", hash = "sha256:563588c587b75c34b928bc428548e5b00ea38c46972181a4d8b75ba7e3f24231"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.66.1)"] +protobuf = ["grpcio-tools (>=1.66.2)"] [[package]] name = "grpcio-status" @@ -3803,27 +3056,6 @@ files = [ {file = "hpack-4.0.0.tar.gz", hash = "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095"}, ] -[[package]] -name = "html5lib" -version = "1.1" -description = "HTML parser based on the WHATWG HTML specification" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"}, - {file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"}, -] - -[package.dependencies] -six = ">=1.9" -webencodings = "*" - -[package.extras] -all = ["chardet (>=2.2)", "genshi", "lxml"] -chardet = ["chardet (>=2.2)"] -genshi = ["genshi"] -lxml = ["lxml"] - [[package]] name = "httpcore" version = "1.0.5" @@ -3936,18 +3168,18 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "huggingface-hub" -version = "0.16.4" +version = "0.25.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"}, - {file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"}, + {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, + {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, ] [package.dependencies] filelock = "*" -fsspec = "*" +fsspec = ">=2023.5.0" packaging = ">=20.9" pyyaml = ">=5.1" requests = "*" @@ -3955,16 +3187,18 @@ tqdm = ">=4.42.1" typing-extensions = ">=3.7.4.3" [package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic"] -quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] +hf-transfer = ["hf-transfer (>=0.1.4)"] +inference = ["aiohttp", "minijinja (>=1.0)"] +quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] -typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] +tensorflow-testing = ["keras (<3.0)", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["safetensors[torch]", "torch"] +typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"] [[package]] name = "humanfriendly" @@ -3993,13 +3227,13 @@ files = [ [[package]] name = "idna" -version = "3.9" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" files = [ - {file = "idna-3.9-py3-none-any.whl", hash = "sha256:69297d5da0cc9281c77efffb4e730254dd45943f45bbfb461de5991713989b1e"}, - {file = "idna-3.9.tar.gz", hash = "sha256:e5c5dafde284f26e9e0f28f6ea2d6400abd5ca099864a67f576f3981c6476124"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] [package.extras] @@ -4089,16 +3323,6 @@ files = [ {file = "jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2"}, ] -[[package]] -name = "jieba3k" -version = "0.35.1" -description = "Chinese Words Segementation Utilities" -optional = false -python-versions = "*" -files = [ - {file = "jieba3k-0.35.1.zip", hash = "sha256:980a4f2636b778d312518066be90c7697d410dd5a472385f5afced71a2db1c10"}, -] - [[package]] name = "jinja2" version = "3.1.4" @@ -4152,20 +3376,6 @@ files = [ [package.dependencies] attrs = ">=19.2.0" -[[package]] -name = "jsonpath-ng" -version = "1.6.1" -description = "A final implementation of JSONPath for Python that aims to be standard compliant, including arithmetic and binary comparison operators and providing clear AST for metaprogramming." -optional = false -python-versions = "*" -files = [ - {file = "jsonpath-ng-1.6.1.tar.gz", hash = "sha256:086c37ba4917304850bd837aeab806670224d3f038fe2833ff593a672ef0a5fa"}, - {file = "jsonpath_ng-1.6.1-py3-none-any.whl", hash = "sha256:8f22cd8273d7772eea9aaa84d922e0841aa36fdb8a2c6b7f6c3791a16a9bc0be"}, -] - -[package.dependencies] -ply = "*" - [[package]] name = "jsonschema" version = "4.23.0" @@ -4216,142 +3426,20 @@ files = [ {file = "kaleido-0.2.1-py2.py3-none-win_amd64.whl", hash = "sha256:4670985f28913c2d063c5734d125ecc28e40810141bdb0a46f15b76c1d45f23c"}, ] -[[package]] -name = "kiwisolver" -version = "1.4.7" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.8" -files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5d5abf8f8ec1f4e22882273c423e16cae834c36856cac348cfbfa68e01c40f3a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aeb3531b196ef6f11776c21674dba836aeea9d5bd1cf630f869e3d90b16cfade"}, - {file = "kiwisolver-1.4.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b7d755065e4e866a8086c9bdada157133ff466476a2ad7861828e17b6026e22c"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08471d4d86cbaec61f86b217dd938a83d85e03785f51121e791a6e6689a3be95"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7bbfcb7165ce3d54a3dfbe731e470f65739c4c1f85bb1018ee912bae139e263b"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d34eb8494bea691a1a450141ebb5385e4b69d38bb8403b5146ad279f4b30fa3"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9242795d174daa40105c1d86aba618e8eab7bf96ba8c3ee614da8302a9f95503"}, - {file = "kiwisolver-1.4.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0f64a48bb81af7450e641e3fe0b0394d7381e342805479178b3d335d60ca7cf"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8e045731a5416357638d1700927529e2b8ab304811671f665b225f8bf8d8f933"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4322872d5772cae7369f8351da1edf255a604ea7087fe295411397d0cfd9655e"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:e1631290ee9271dffe3062d2634c3ecac02c83890ada077d225e081aca8aab89"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:edcfc407e4eb17e037bca59be0e85a2031a2ac87e4fed26d3e9df88b4165f92d"}, - {file = "kiwisolver-1.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4d05d81ecb47d11e7f8932bd8b61b720bf0b41199358f3f5e36d38e28f0532c5"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win32.whl", hash = "sha256:b38ac83d5f04b15e515fd86f312479d950d05ce2368d5413d46c088dda7de90a"}, - {file = "kiwisolver-1.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:d83db7cde68459fc803052a55ace60bea2bae361fc3b7a6d5da07e11954e4b09"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f9362ecfca44c863569d3d3c033dbe8ba452ff8eed6f6b5806382741a1334bd"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e8df2eb9b2bac43ef8b082e06f750350fbbaf2887534a5be97f6cf07b19d9583"}, - {file = "kiwisolver-1.4.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f32d6edbc638cde7652bd690c3e728b25332acbadd7cad670cc4a02558d9c417"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2e6c39bd7b9372b0be21456caab138e8e69cc0fc1190a9dfa92bd45a1e6e904"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dda56c24d869b1193fcc763f1284b9126550eaf84b88bbc7256e15028f19188a"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79849239c39b5e1fd906556c474d9b0439ea6792b637511f3fe3a41158d89ca8"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5e3bc157fed2a4c02ec468de4ecd12a6e22818d4f09cde2c31ee3226ffbefab2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3da53da805b71e41053dc670f9a820d1157aae77b6b944e08024d17bcd51ef88"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8705f17dfeb43139a692298cb6637ee2e59c0194538153e83e9ee0c75c2eddde"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:82a5c2f4b87c26bb1a0ef3d16b5c4753434633b83d365cc0ddf2770c93829e3c"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce8be0466f4c0d585cdb6c1e2ed07232221df101a4c6f28821d2aa754ca2d9e2"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:409afdfe1e2e90e6ee7fc896f3df9a7fec8e793e58bfa0d052c8a82f99c37abb"}, - {file = "kiwisolver-1.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5b9c3f4ee0b9a439d2415012bd1b1cc2df59e4d6a9939f4d669241d30b414327"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win32.whl", hash = "sha256:a79ae34384df2b615eefca647a2873842ac3b596418032bef9a7283675962644"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:cf0438b42121a66a3a667de17e779330fc0f20b0d97d59d2f2121e182b0505e4"}, - {file = "kiwisolver-1.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:764202cc7e70f767dab49e8df52c7455e8de0df5d858fa801a11aa0d882ccf3f"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bfa1acfa0c54932d5607e19a2c24646fb4c1ae2694437789129cf099789a3b00"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eee3ea935c3d227d49b4eb85660ff631556841f6e567f0f7bda972df6c2c9935"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f3160309af4396e0ed04db259c3ccbfdc3621b5559b5453075e5de555e1f3a1b"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a17f6a29cf8935e587cc8a4dbfc8368c55edc645283db0ce9801016f83526c2d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10849fb2c1ecbfae45a693c070e0320a91b35dd4bcf58172c023b994283a124d"}, - {file = "kiwisolver-1.4.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:ac542bf38a8a4be2dc6b15248d36315ccc65f0743f7b1a76688ffb6b5129a5c2"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8b01aac285f91ca889c800042c35ad3b239e704b150cfd3382adfc9dcc780e39"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48be928f59a1f5c8207154f935334d374e79f2b5d212826307d072595ad76a2e"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f37cfe618a117e50d8c240555331160d73d0411422b59b5ee217843d7b693608"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:599b5c873c63a1f6ed7eead644a8a380cfbdf5db91dcb6f85707aaab213b1674"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:801fa7802e5cfabe3ab0c81a34c323a319b097dfb5004be950482d882f3d7225"}, - {file = "kiwisolver-1.4.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0c6c43471bc764fad4bc99c5c2d6d16a676b1abf844ca7c8702bdae92df01ee0"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, -] - [[package]] name = "kombu" -version = "5.4.1" +version = "5.4.2" description = "Messaging library for Python." optional = false python-versions = ">=3.8" files = [ - {file = "kombu-5.4.1-py3-none-any.whl", hash = "sha256:621d365f234e4c089596f3a2510f1ade07026efc28caca426161d8f458786cab"}, - {file = "kombu-5.4.1.tar.gz", hash = "sha256:1c05178826dab811f8cab5b0a154d42a7a33d8bcdde9fa3d7b4582e43c3c03db"}, + {file = "kombu-5.4.2-py3-none-any.whl", hash = "sha256:14212f5ccf022fc0a70453bb025a1dcc32782a588c49ea866884047d66e14763"}, + {file = "kombu-5.4.2.tar.gz", hash = "sha256:eef572dd2fd9fc614b37580e3caeafdd5af46c1eff31e7fba89138cdb406f2cf"}, ] [package.dependencies] amqp = ">=5.1.1,<6.0.0" +tzdata = {version = "*", markers = "python_version >= \"3.9\""} vine = "5.1.0" [package.extras] @@ -4373,17 +3461,18 @@ zookeeper = ["kazoo (>=2.8.0)"] [[package]] name = "kubernetes" -version = "30.1.0" +version = "31.0.0" description = "Kubernetes python client" optional = false python-versions = ">=3.6" files = [ - {file = "kubernetes-30.1.0-py2.py3-none-any.whl", hash = "sha256:e212e8b7579031dd2e512168b617373bc1e03888d41ac4e04039240a292d478d"}, - {file = "kubernetes-30.1.0.tar.gz", hash = "sha256:41e4c77af9f28e7a6c314e3bd06a8c6229ddd787cad684e0ab9f69b498e98ebc"}, + {file = "kubernetes-31.0.0-py2.py3-none-any.whl", hash = "sha256:bf141e2d380c8520eada8b351f4e319ffee9636328c137aa432bc486ca1200e1"}, + {file = "kubernetes-31.0.0.tar.gz", hash = "sha256:28945de906c8c259c1ebe62703b56a03b714049372196f854105afe4e6d014c0"}, ] [package.dependencies] certifi = ">=14.05.14" +durationpy = ">=0.7" google-auth = ">=1.0.1" oauthlib = ">=3.2.2" python-dateutil = ">=2.5.3" @@ -4413,13 +3502,13 @@ six = "*" [[package]] name = "langfuse" -version = "2.48.1" +version = "2.51.2" description = "A client library for accessing langfuse" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langfuse-2.48.1-py3-none-any.whl", hash = "sha256:8661070b6d94ba1d7da92c054f3110b6ecf4489d6e8204a4080f934f3f49ebf2"}, - {file = "langfuse-2.48.1.tar.gz", hash = "sha256:b8117d90babec6be1bc3303b42e0b71848531eae44118e6e0123d03e7961d0fc"}, + {file = "langfuse-2.51.2-py3-none-any.whl", hash = "sha256:7aab94a9452cda4587a2cd4917e455da1afd7f8a2696688742130e2f2d23ca59"}, + {file = "langfuse-2.51.2.tar.gz", hash = "sha256:0982b108ab4c02947f682e442b0796b7a73825d31eeace1771575f6454b8f79a"}, ] [package.dependencies] @@ -4438,13 +3527,13 @@ openai = ["openai (>=0.27.8)"] [[package]] name = "langsmith" -version = "0.1.120" +version = "0.1.129" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.120-py3-none-any.whl", hash = "sha256:54d2785e301646c0988e0a69ebe4d976488c87b41928b358cb153b6ddd8db62b"}, - {file = "langsmith-0.1.120.tar.gz", hash = "sha256:25499ca187b41bd89d784b272b97a8d76f60e0e21bdf20336e8a2aa6a9b23ac9"}, + {file = "langsmith-0.1.129-py3-none-any.whl", hash = "sha256:31393fbbb17d6be5b99b9b22d530450094fab23c6c37281a6a6efb2143d05347"}, + {file = "langsmith-0.1.129.tar.gz", hash = "sha256:6c3ba66471bef41b9f87da247cc0b493268b3f54656f73648a256a205261b6a0"}, ] [package.dependencies] @@ -4871,54 +3960,6 @@ dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] tests = ["pytest", "pytz", "simplejson"] -[[package]] -name = "matplotlib" -version = "3.8.4" -description = "Python plotting package" -optional = false -python-versions = ">=3.9" -files = [ - {file = "matplotlib-3.8.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:abc9d838f93583650c35eca41cfcec65b2e7cb50fd486da6f0c49b5e1ed23014"}, - {file = "matplotlib-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f65c9f002d281a6e904976007b2d46a1ee2bcea3a68a8c12dda24709ddc9106"}, - {file = "matplotlib-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce1edd9f5383b504dbc26eeea404ed0a00656c526638129028b758fd43fc5f10"}, - {file = "matplotlib-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd79298550cba13a43c340581a3ec9c707bd895a6a061a78fa2524660482fc0"}, - {file = "matplotlib-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:90df07db7b599fe7035d2f74ab7e438b656528c68ba6bb59b7dc46af39ee48ef"}, - {file = "matplotlib-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:ac24233e8f2939ac4fd2919eed1e9c0871eac8057666070e94cbf0b33dd9c338"}, - {file = "matplotlib-3.8.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:72f9322712e4562e792b2961971891b9fbbb0e525011e09ea0d1f416c4645661"}, - {file = "matplotlib-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:232ce322bfd020a434caaffbd9a95333f7c2491e59cfc014041d95e38ab90d1c"}, - {file = "matplotlib-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6addbd5b488aedb7f9bc19f91cd87ea476206f45d7116fcfe3d31416702a82fa"}, - {file = "matplotlib-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc4ccdc64e3039fc303defd119658148f2349239871db72cd74e2eeaa9b80b71"}, - {file = "matplotlib-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b7a2a253d3b36d90c8993b4620183b55665a429da8357a4f621e78cd48b2b30b"}, - {file = "matplotlib-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:8080d5081a86e690d7688ffa542532e87f224c38a6ed71f8fbed34dd1d9fedae"}, - {file = "matplotlib-3.8.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6485ac1f2e84676cff22e693eaa4fbed50ef5dc37173ce1f023daef4687df616"}, - {file = "matplotlib-3.8.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c89ee9314ef48c72fe92ce55c4e95f2f39d70208f9f1d9db4e64079420d8d732"}, - {file = "matplotlib-3.8.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50bac6e4d77e4262c4340d7a985c30912054745ec99756ce213bfbc3cb3808eb"}, - {file = "matplotlib-3.8.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f51c4c869d4b60d769f7b4406eec39596648d9d70246428745a681c327a8ad30"}, - {file = "matplotlib-3.8.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b12ba985837e4899b762b81f5b2845bd1a28f4fdd1a126d9ace64e9c4eb2fb25"}, - {file = "matplotlib-3.8.4-cp312-cp312-win_amd64.whl", hash = "sha256:7a6769f58ce51791b4cb8b4d7642489df347697cd3e23d88266aaaee93b41d9a"}, - {file = "matplotlib-3.8.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:843cbde2f0946dadd8c5c11c6d91847abd18ec76859dc319362a0964493f0ba6"}, - {file = "matplotlib-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c13f041a7178f9780fb61cc3a2b10423d5e125480e4be51beaf62b172413b67"}, - {file = "matplotlib-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb44f53af0a62dc80bba4443d9b27f2fde6acfdac281d95bc872dc148a6509cc"}, - {file = "matplotlib-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:606e3b90897554c989b1e38a258c626d46c873523de432b1462f295db13de6f9"}, - {file = "matplotlib-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9bb0189011785ea794ee827b68777db3ca3f93f3e339ea4d920315a0e5a78d54"}, - {file = "matplotlib-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:6209e5c9aaccc056e63b547a8152661324404dd92340a6e479b3a7f24b42a5d0"}, - {file = "matplotlib-3.8.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c7064120a59ce6f64103c9cefba8ffe6fba87f2c61d67c401186423c9a20fd35"}, - {file = "matplotlib-3.8.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0e47eda4eb2614300fc7bb4657fced3e83d6334d03da2173b09e447418d499f"}, - {file = "matplotlib-3.8.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:493e9f6aa5819156b58fce42b296ea31969f2aab71c5b680b4ea7a3cb5c07d94"}, - {file = "matplotlib-3.8.4.tar.gz", hash = "sha256:8aac397d5e9ec158960e31c381c5ffc52ddd52bd9a47717e2a694038167dffea"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -kiwisolver = ">=1.3.1" -numpy = ">=1.21" -packaging = ">=20.0" -pillow = ">=8" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" - [[package]] name = "mdurl" version = "0.1.2" @@ -4948,95 +3989,116 @@ tqdm = "*" [[package]] name = "mmh3" -version = "4.1.0" +version = "5.0.1" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:be5ac76a8b0cd8095784e51e4c1c9c318c19edcd1709a06eb14979c8d850c31a"}, - {file = "mmh3-4.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:98a49121afdfab67cd80e912b36404139d7deceb6773a83620137aaa0da5714c"}, - {file = "mmh3-4.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5259ac0535874366e7d1a5423ef746e0d36a9e3c14509ce6511614bdc5a7ef5b"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5950827ca0453a2be357696da509ab39646044e3fa15cad364eb65d78797437"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1dd0f652ae99585b9dd26de458e5f08571522f0402155809fd1dc8852a613a39"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99d25548070942fab1e4a6f04d1626d67e66d0b81ed6571ecfca511f3edf07e6"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53db8d9bad3cb66c8f35cbc894f336273f63489ce4ac416634932e3cbe79eb5b"}, - {file = "mmh3-4.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75da0f615eb55295a437264cc0b736753f830b09d102aa4c2a7d719bc445ec05"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b926b07fd678ea84b3a2afc1fa22ce50aeb627839c44382f3d0291e945621e1a"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c5b053334f9b0af8559d6da9dc72cef0a65b325ebb3e630c680012323c950bb6"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:5bf33dc43cd6de2cb86e0aa73a1cc6530f557854bbbe5d59f41ef6de2e353d7b"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fa7eacd2b830727ba3dd65a365bed8a5c992ecd0c8348cf39a05cc77d22f4970"}, - {file = "mmh3-4.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:42dfd6742b9e3eec599f85270617debfa0bbb913c545bb980c8a4fa7b2d047da"}, - {file = "mmh3-4.1.0-cp310-cp310-win32.whl", hash = "sha256:2974ad343f0d39dcc88e93ee6afa96cedc35a9883bc067febd7ff736e207fa47"}, - {file = "mmh3-4.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:74699a8984ded645c1a24d6078351a056f5a5f1fe5838870412a68ac5e28d865"}, - {file = "mmh3-4.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:f0dc874cedc23d46fc488a987faa6ad08ffa79e44fb08e3cd4d4cf2877c00a00"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3280a463855b0eae64b681cd5b9ddd9464b73f81151e87bb7c91a811d25619e6"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:97ac57c6c3301769e757d444fa7c973ceb002cb66534b39cbab5e38de61cd896"}, - {file = "mmh3-4.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b6502cdb4dbd880244818ab363c8770a48cdccecf6d729ade0241b736b5ec0"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52ba2da04671a9621580ddabf72f06f0e72c1c9c3b7b608849b58b11080d8f14"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a5fef4c4ecc782e6e43fbeab09cff1bac82c998a1773d3a5ee6a3605cde343e"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5135358a7e00991f73b88cdc8eda5203bf9de22120d10a834c5761dbeb07dd13"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cff9ae76a54f7c6fe0167c9c4028c12c1f6de52d68a31d11b6790bb2ae685560"}, - {file = "mmh3-4.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f02576a4d106d7830ca90278868bf0983554dd69183b7bbe09f2fcd51cf54f"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:073d57425a23721730d3ff5485e2da489dd3c90b04e86243dd7211f889898106"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:71e32ddec7f573a1a0feb8d2cf2af474c50ec21e7a8263026e8d3b4b629805db"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7cbb20b29d57e76a58b40fd8b13a9130db495a12d678d651b459bf61c0714cea"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a42ad267e131d7847076bb7e31050f6c4378cd38e8f1bf7a0edd32f30224d5c9"}, - {file = "mmh3-4.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a013979fc9390abadc445ea2527426a0e7a4495c19b74589204f9b71bcaafeb"}, - {file = "mmh3-4.1.0-cp311-cp311-win32.whl", hash = "sha256:1d3b1cdad7c71b7b88966301789a478af142bddcb3a2bee563f7a7d40519a00f"}, - {file = "mmh3-4.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0dc6dc32eb03727467da8e17deffe004fbb65e8b5ee2b502d36250d7a3f4e2ec"}, - {file = "mmh3-4.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9ae3a5c1b32dda121c7dc26f9597ef7b01b4c56a98319a7fe86c35b8bc459ae6"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0033d60c7939168ef65ddc396611077a7268bde024f2c23bdc283a19123f9e9c"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d6af3e2287644b2b08b5924ed3a88c97b87b44ad08e79ca9f93d3470a54a41c5"}, - {file = "mmh3-4.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d82eb4defa245e02bb0b0dc4f1e7ee284f8d212633389c91f7fba99ba993f0a2"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba245e94b8d54765e14c2d7b6214e832557e7856d5183bc522e17884cab2f45d"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb04e2feeabaad6231e89cd43b3d01a4403579aa792c9ab6fdeef45cc58d4ec0"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e3b1a27def545ce11e36158ba5d5390cdbc300cfe456a942cc89d649cf7e3b2"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce0ab79ff736d7044e5e9b3bfe73958a55f79a4ae672e6213e92492ad5e734d5"}, - {file = "mmh3-4.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b02268be6e0a8eeb8a924d7db85f28e47344f35c438c1e149878bb1c47b1cd3"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:deb887f5fcdaf57cf646b1e062d56b06ef2f23421c80885fce18b37143cba828"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99dd564e9e2b512eb117bd0cbf0f79a50c45d961c2a02402787d581cec5448d5"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:08373082dfaa38fe97aa78753d1efd21a1969e51079056ff552e687764eafdfe"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:54b9c6a2ea571b714e4fe28d3e4e2db37abfd03c787a58074ea21ee9a8fd1740"}, - {file = "mmh3-4.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a7b1edf24c69e3513f879722b97ca85e52f9032f24a52284746877f6a7304086"}, - {file = "mmh3-4.1.0-cp312-cp312-win32.whl", hash = "sha256:411da64b951f635e1e2284b71d81a5a83580cea24994b328f8910d40bed67276"}, - {file = "mmh3-4.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bebc3ecb6ba18292e3d40c8712482b4477abd6981c2ebf0e60869bd90f8ac3a9"}, - {file = "mmh3-4.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:168473dd608ade6a8d2ba069600b35199a9af837d96177d3088ca91f2b3798e3"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:372f4b7e1dcde175507640679a2a8790185bb71f3640fc28a4690f73da986a3b"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:438584b97f6fe13e944faf590c90fc127682b57ae969f73334040d9fa1c7ffa5"}, - {file = "mmh3-4.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6e27931b232fc676675fac8641c6ec6b596daa64d82170e8597f5a5b8bdcd3b6"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:571a92bad859d7b0330e47cfd1850b76c39b615a8d8e7aa5853c1f971fd0c4b1"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a69d6afe3190fa08f9e3a58e5145549f71f1f3fff27bd0800313426929c7068"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afb127be0be946b7630220908dbea0cee0d9d3c583fa9114a07156f98566dc28"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:940d86522f36348ef1a494cbf7248ab3f4a1638b84b59e6c9e90408bd11ad729"}, - {file = "mmh3-4.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3dcccc4935686619a8e3d1f7b6e97e3bd89a4a796247930ee97d35ea1a39341"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01bb9b90d61854dfc2407c5e5192bfb47222d74f29d140cb2dd2a69f2353f7cc"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:bcb1b8b951a2c0b0fb8a5426c62a22557e2ffc52539e0a7cc46eb667b5d606a9"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6477a05d5e5ab3168e82e8b106e316210ac954134f46ec529356607900aea82a"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:da5892287e5bea6977364b15712a2573c16d134bc5fdcdd4cf460006cf849278"}, - {file = "mmh3-4.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:99180d7fd2327a6fffbaff270f760576839dc6ee66d045fa3a450f3490fda7f5"}, - {file = "mmh3-4.1.0-cp38-cp38-win32.whl", hash = "sha256:9b0d4f3949913a9f9a8fb1bb4cc6ecd52879730aab5ff8c5a3d8f5b593594b73"}, - {file = "mmh3-4.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:598c352da1d945108aee0c3c3cfdd0e9b3edef74108f53b49d481d3990402169"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:475d6d1445dd080f18f0f766277e1237fa2914e5fe3307a3b2a3044f30892103"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5ca07c41e6a2880991431ac717c2a049056fff497651a76e26fc22224e8b5732"}, - {file = "mmh3-4.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ebe052fef4bbe30c0548d12ee46d09f1b69035ca5208a7075e55adfe091be44"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaefd42e85afb70f2b855a011f7b4d8a3c7e19c3f2681fa13118e4d8627378c5"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0ae43caae5a47afe1b63a1ae3f0986dde54b5fb2d6c29786adbfb8edc9edfb"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6218666f74c8c013c221e7f5f8a693ac9cf68e5ac9a03f2373b32d77c48904de"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac59294a536ba447b5037f62d8367d7d93b696f80671c2c45645fa9f1109413c"}, - {file = "mmh3-4.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:086844830fcd1e5c84fec7017ea1ee8491487cfc877847d96f86f68881569d2e"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e42b38fad664f56f77f6fbca22d08450f2464baa68acdbf24841bf900eb98e87"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d08b790a63a9a1cde3b5d7d733ed97d4eb884bfbc92f075a091652d6bfd7709a"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:73ea4cc55e8aea28c86799ecacebca09e5f86500414870a8abaedfcbaf74d288"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f90938ff137130e47bcec8dc1f4ceb02f10178c766e2ef58a9f657ff1f62d124"}, - {file = "mmh3-4.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:aa1f13e94b8631c8cd53259250556edcf1de71738936b60febba95750d9632bd"}, - {file = "mmh3-4.1.0-cp39-cp39-win32.whl", hash = "sha256:a3b680b471c181490cf82da2142029edb4298e1bdfcb67c76922dedef789868d"}, - {file = "mmh3-4.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:fefef92e9c544a8dbc08f77a8d1b6d48006a750c4375bbcd5ff8199d761e263b"}, - {file = "mmh3-4.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:8e2c1f6a2b41723a4f82bd5a762a777836d29d664fc0095f17910bea0adfd4a6"}, - {file = "mmh3-4.1.0.tar.gz", hash = "sha256:a1cf25348b9acd229dda464a094d6170f47d2850a1fcb762a3b6172d2ce6ca4a"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f0a4b4bf05778ed77d820d6e7d0e9bd6beb0c01af10e1ce9233f5d2f814fcafa"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac7a391039aeab95810c2d020b69a94eb6b4b37d4e2374831e92db3a0cdf71c6"}, + {file = "mmh3-5.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3a2583b5521ca49756d8d8bceba80627a9cc295f255dcab4e3df7ccc2f09679a"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:081a8423fe53c1ac94f87165f3e4c500125d343410c1a0c5f1703e898a3ef038"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8b4d72713799755dc8954a7d36d5c20a6c8de7b233c82404d122c7c7c1707cc"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:389a6fd51efc76d3182d36ec306448559c1244f11227d2bb771bdd0e6cc91321"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39f4128edaa074bff721b1d31a72508cba4d2887ee7867f22082e1fe9d4edea0"}, + {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5d23a94d91aabba3386b3769048d5f4210fdfef80393fece2f34ba5a7b466c"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:16347d038361f8b8f24fd2b7ef378c9b68ddee9f7706e46269b6e0d322814713"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e299408565af7d61f2d20a5ffdd77cf2ed902460fe4e6726839d59ba4b72316"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42050af21ddfc5445ee5a66e73a8fc758c71790305e3ee9e4a85a8e69e810f94"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2ae9b1f5ef27ec54659920f0404b7ceb39966e28867c461bfe83a05e8d18ddb0"}, + {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:50c2495a02045f3047d71d4ae9cdd7a15efc0bcbb7ff17a18346834a8e2d1d19"}, + {file = "mmh3-5.0.1-cp310-cp310-win32.whl", hash = "sha256:c028fa77cddf351ca13b4a56d43c1775652cde0764cadb39120b68f02a23ecf6"}, + {file = "mmh3-5.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c5e741e421ec14400c4aae30890515c201f518403bdef29ae1e00d375bb4bbb5"}, + {file = "mmh3-5.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:b17156d56fabc73dbf41bca677ceb6faed435cc8544f6566d72ea77d8a17e9d0"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a6d5a9b1b923f1643559ba1fc0bf7a5076c90cbb558878d3bf3641ce458f25d"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3349b968be555f7334bbcce839da98f50e1e80b1c615d8e2aa847ea4a964a012"}, + {file = "mmh3-5.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1bd3c94b110e55db02ab9b605029f48a2f7f677c6e58c09d44e42402d438b7e1"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ba84d48608f79adbb10bb09986b6dc33eeda5c2d1bd75d00820081b73bde9"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0217987a8b8525c8d9170f66d036dec4ab45cfbd53d47e8d76125791ceb155e"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2797063a34e78d1b61639a98b0edec1c856fa86ab80c7ec859f1796d10ba429"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8bba16340adcbd47853a2fbe5afdb397549e8f2e79324ff1dced69a3f8afe7c3"}, + {file = "mmh3-5.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:282797957c9f60b51b9d768a602c25f579420cc9af46feb77d457a27823d270a"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e4fb670c29e63f954f9e7a2cdcd57b36a854c2538f579ef62681ccbaa1de2b69"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ee7d85438dc6aff328e19ab052086a3c29e8a9b632998a49e5c4b0034e9e8d6"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b7fb5db231f3092444bc13901e6a8d299667126b00636ffbad4a7b45e1051e2f"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:c100dd441703da5ec136b1d9003ed4a041d8a1136234c9acd887499796df6ad8"}, + {file = "mmh3-5.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:71f3b765138260fd7a7a2dba0ea5727dabcd18c1f80323c9cfef97a7e86e01d0"}, + {file = "mmh3-5.0.1-cp311-cp311-win32.whl", hash = "sha256:9a76518336247fd17689ce3ae5b16883fd86a490947d46a0193d47fb913e26e3"}, + {file = "mmh3-5.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:336bc4df2e44271f1c302d289cc3d78bd52d3eed8d306c7e4bff8361a12bf148"}, + {file = "mmh3-5.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:af6522722fbbc5999aa66f7244d0986767a46f1fb05accc5200f75b72428a508"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f2730bb263ed9c388e8860438b057a53e3cc701134a6ea140f90443c4c11aa40"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6246927bc293f6d56724536400b85fb85f5be26101fa77d5f97dd5e2a4c69bf2"}, + {file = "mmh3-5.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fbca322519a6e6e25b6abf43e940e1667cf8ea12510e07fb4919b48a0cd1c411"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eae8c19903ed8a1724ad9e67e86f15d198a7a1271a4f9be83d47e38f312ed672"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a09fd6cc72c07c0c07c3357714234b646d78052487c4a3bd5f7f6e08408cff60"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ff8551fee7ae3b11c5d986b6347ade0dccaadd4670ffdb2b944dee120ffcc84"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e39694c73a5a20c8bf36dfd8676ed351e5234d55751ba4f7562d85449b21ef3f"}, + {file = "mmh3-5.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eba6001989a92f72a89c7cf382fda831678bd780707a66b4f8ca90239fdf2123"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0771f90c9911811cc606a5c7b7b58f33501c9ee896ed68a6ac22c7d55878ecc0"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:09b31ed0c0c0920363e96641fac4efde65b1ab62b8df86293142f35a254e72b4"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5cf4a8deda0235312db12075331cb417c4ba163770edfe789bde71d08a24b692"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:41f7090a95185ef20ac018581a99337f0cbc84a2135171ee3290a9c0d9519585"}, + {file = "mmh3-5.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b97b5b368fb7ff22194ec5854f5b12d8de9ab67a0f304728c7f16e5d12135b76"}, + {file = "mmh3-5.0.1-cp312-cp312-win32.whl", hash = "sha256:842516acf04da546f94fad52db125ee619ccbdcada179da51c326a22c4578cb9"}, + {file = "mmh3-5.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:d963be0dbfd9fca209c17172f6110787ebf78934af25e3694fe2ba40e55c1e2b"}, + {file = "mmh3-5.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:a5da292ceeed8ce8e32b68847261a462d30fd7b478c3f55daae841404f433c15"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:673e3f1c8d4231d6fb0271484ee34cb7146a6499fc0df80788adb56fd76842da"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f795a306bd16a52ad578b663462cc8e95500b3925d64118ae63453485d67282b"}, + {file = "mmh3-5.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5ed57a5e28e502a1d60436cc25c76c3a5ba57545f250f2969af231dc1221e0a5"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:632c28e7612e909dbb6cbe2fe496201ada4695b7715584005689c5dc038e59ad"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:53fd6bd525a5985e391c43384672d9d6b317fcb36726447347c7fc75bfed34ec"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dceacf6b0b961a0e499836af3aa62d60633265607aef551b2a3e3c48cdaa5edd"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f0738d478fdfb5d920f6aff5452c78f2c35b0eff72caa2a97dfe38e82f93da2"}, + {file = "mmh3-5.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e70285e7391ab88b872e5bef632bad16b9d99a6d3ca0590656a4753d55988af"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:27e5fc6360aa6b828546a4318da1a7da6bf6e5474ccb053c3a6aa8ef19ff97bd"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7989530c3c1e2c17bf5a0ec2bba09fd19819078ba90beedabb1c3885f5040b0d"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:cdad7bee649950da7ecd3cbbbd12fb81f1161072ecbdb5acfa0018338c5cb9cf"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e143b8f184c1bb58cecd85ab4a4fd6dc65a2d71aee74157392c3fddac2a4a331"}, + {file = "mmh3-5.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5eb12e886f3646dd636f16b76eb23fc0c27e8ff3c1ae73d4391e50ef60b40f6"}, + {file = "mmh3-5.0.1-cp313-cp313-win32.whl", hash = "sha256:16e6dddfa98e1c2d021268e72c78951234186deb4df6630e984ac82df63d0a5d"}, + {file = "mmh3-5.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:d3ffb792d70b8c4a2382af3598dad6ae0c5bd9cee5b7ffcc99aa2f5fd2c1bf70"}, + {file = "mmh3-5.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:122fa9ec148383f9124292962bda745f192b47bfd470b2af5fe7bb3982b17896"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b12bad8c75e6ff5d67319794fb6a5e8c713826c818d47f850ad08b4aa06960c6"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e5bbb066538c1048d542246fc347bb7994bdda29a3aea61c22f9f8b57111ce69"}, + {file = "mmh3-5.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:eee6134273f64e2a106827cc8fd77e70cc7239a285006fc6ab4977d59b015af2"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d04d9aa19d48e4c7bbec9cabc2c4dccc6ff3b2402f856d5bf0de03e10f167b5b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79f37da1eed034d06567a69a7988456345c7f29e49192831c3975b464493b16e"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:242f77666743337aa828a2bf2da71b6ba79623ee7f93edb11e009f69237c8561"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffd943fff690463945f6441a2465555b3146deaadf6a5e88f2590d14c655d71b"}, + {file = "mmh3-5.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565b15f8d7df43acb791ff5a360795c20bfa68bca8b352509e0fbabd06cc48cd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc6aafb867c2030df98ac7760ff76b500359252867985f357bd387739f3d5287"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:32898170644d45aa27c974ab0d067809c066205110f5c6d09f47d9ece6978bfe"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:42865567838d2193eb64e0ef571f678bf361a254fcdef0c5c8e73243217829bd"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:5ff5c1f301c4a8b6916498969c0fcc7e3dbc56b4bfce5cfe3fe31f3f4609e5ae"}, + {file = "mmh3-5.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:be74c2dda8a6f44a504450aa2c3507f8067a159201586fc01dd41ab80efc350f"}, + {file = "mmh3-5.0.1-cp38-cp38-win32.whl", hash = "sha256:5610a842621ff76c04b20b29cf5f809b131f241a19d4937971ba77dc99a7f330"}, + {file = "mmh3-5.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:de15739ac50776fe8aa1ef13f1be46a6ee1fbd45f6d0651084097eb2be0a5aa4"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:48e84cf3cc7e8c41bc07de72299a73b92d9e3cde51d97851420055b1484995f7"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd9dc28c2d168c49928195c2e29b96f9582a5d07bd690a28aede4cc07b0e696"}, + {file = "mmh3-5.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2771a1c56a3d4bdad990309cff5d0a8051f29c8ec752d001f97d6392194ae880"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5ff2a8322ba40951a84411550352fba1073ce1c1d1213bb7530f09aed7f8caf"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a16bd3ec90682c9e0a343e6bd4c778c09947c8c5395cdb9e5d9b82b2559efbca"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d45733a78d68b5b05ff4a823aea51fa664df1d3bf4929b152ff4fd6dea2dd69b"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:904285e83cedebc8873b0838ed54c20f7344120be26e2ca5a907ab007a18a7a0"}, + {file = "mmh3-5.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac4aeb1784e43df728034d0ed72e4b2648db1a69fef48fa58e810e13230ae5ff"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cb3d4f751a0b8b4c8d06ef1c085216c8fddcc8b8c8d72445976b5167a40c6d1e"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8021851935600e60c42122ed1176399d7692df338d606195cd599d228a04c1c6"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:6182d5924a5efc451900f864cbb021d7e8ad5d524816ca17304a0f663bc09bb5"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:5f30b834552a4f79c92e3d266336fb87fd92ce1d36dc6813d3e151035890abbd"}, + {file = "mmh3-5.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cd4383f35e915e06d077df27e04ffd3be7513ec6a9de2d31f430393f67e192a7"}, + {file = "mmh3-5.0.1-cp39-cp39-win32.whl", hash = "sha256:1455fb6b42665a97db8fc66e89a861e52b567bce27ed054c47877183f86ea6e3"}, + {file = "mmh3-5.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e26a0f4eb9855a143f5938a53592fa14c2d3b25801c2106886ab6c173982780"}, + {file = "mmh3-5.0.1-cp39-cp39-win_arm64.whl", hash = "sha256:0d0a35a69abdad7549c4030a714bb4ad07902edb3bbe61e1bbc403ded5d678be"}, + {file = "mmh3-5.0.1.tar.gz", hash = "sha256:7dab080061aeb31a6069a181f27c473a1f67933854e36a3464931f2716508896"}, ] [package.extras] -test = ["mypy (>=1.0)", "pytest (>=7.0.0)"] +benchmark = ["pymmh3 (==0.0.5)", "pyperf (==2.7.0)", "xxhash (==3.5.0)"] +docs = ["myst-parser (==4.0.0)", "shibuya (==2024.8.30)", "sphinx (==8.0.2)", "sphinx-copybutton (==0.5.2)"] +lint = ["black (==24.8.0)", "clang-format (==18.1.8)", "isort (==5.13.2)", "pylint (==3.2.7)"] +plot = ["matplotlib (==3.9.2)", "pandas (==2.2.2)"] +test = ["pytest (==8.3.3)", "pytest-sugar (==1.0.0)"] +type = ["mypy (==1.11.2)"] [[package]] name = "mock" @@ -5282,17 +4344,6 @@ files = [ [package.dependencies] dill = ">=0.3.8" -[[package]] -name = "multitasking" -version = "0.0.11" -description = "Non-blocking Python methods using decorators" -optional = false -python-versions = "*" -files = [ - {file = "multitasking-0.0.11-py3-none-any.whl", hash = "sha256:1e5b37a5f8fc1e6cfaafd1a82b6b1cc6d2ed20037d3b89c25a84f499bd7b3dd4"}, - {file = "multitasking-0.0.11.tar.gz", hash = "sha256:4d6bc3cc65f9b2dca72fb5a787850a88dae8f620c2b36ae9b55248e51bcd6026"}, -] - [[package]] name = "mypy-extensions" version = "1.0.0" @@ -5304,32 +4355,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "newspaper3k" -version = "0.2.8" -description = "Simplified python article discovery & extraction." -optional = false -python-versions = "*" -files = [ - {file = "newspaper3k-0.2.8-py3-none-any.whl", hash = "sha256:44a864222633d3081113d1030615991c3dbba87239f6bbf59d91240f71a22e3e"}, - {file = "newspaper3k-0.2.8.tar.gz", hash = "sha256:9f1bd3e1fb48f400c715abf875cc7b0a67b7ddcd87f50c9aeeb8fcbbbd9004fb"}, -] - -[package.dependencies] -beautifulsoup4 = ">=4.4.1" -cssselect = ">=0.9.2" -feedfinder2 = ">=0.0.4" -feedparser = ">=5.2.1" -jieba3k = ">=0.35.1" -lxml = ">=3.6.0" -nltk = ">=3.2.1" -Pillow = ">=3.3.0" -python-dateutil = ">=2.5.3" -PyYAML = ">=3.11" -requests = ">=2.10.0" -tinysegmenter = "0.3" -tldextract = ">=2.0.1" - [[package]] name = "nltk" version = "3.8.1" @@ -5385,22 +4410,6 @@ aws = ["boto3", "sagemaker"] dev = ["black (==24.3.0)", "cairosvg", "coverage", "isort", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "myst-parser", "nomic[all]", "pandas", "pillow", "pylint", "pyright", "pytest", "pytorch-lightning", "twine"] local = ["gpt4all (>=2.5.0,<3)"] -[[package]] -name = "novita-client" -version = "0.5.7" -description = "novita SDK for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "novita_client-0.5.7-py3-none-any.whl", hash = "sha256:844a4c09c98328c8d4f72e1d3f63f76285c2963dcc37ccb2de41cbfdbe7fa51d"}, - {file = "novita_client-0.5.7.tar.gz", hash = "sha256:65baf748757aafd8ab080a64f9ab069a40c0810fc1fa9be9c26596988a0aa4b4"}, -] - -[package.dependencies] -dataclass-wizard = ">=0.22.2" -pillow = ">=10.2.0" -requests = ">=2.27.1" - [[package]] name = "numba" version = "0.60.0" @@ -5437,44 +4446,44 @@ numpy = ">=1.22,<2.1" [[package]] name = "numexpr" -version = "2.9.0" +version = "2.10.1" description = "Fast numerical expression evaluator for NumPy" optional = false python-versions = ">=3.9" files = [ - {file = "numexpr-2.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c52b4ac54514f5d4d8ead66768810cd5f77aa198e6064213d9b5c7b2e1c97c35"}, - {file = "numexpr-2.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50f57bc333f285e8c46b1ce61c6e94ec9bb74e4ea0d674d1c6c6f4a286f64fe4"}, - {file = "numexpr-2.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:943ba141f3884ffafa3fa1a3ebf3cdda9e9688a67a3c91986e6eae13dc073d43"}, - {file = "numexpr-2.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee48acd6339748a65c0e32403b802ebfadd9cb0e3b602ba5889896238eafdd61"}, - {file = "numexpr-2.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:972e29b5cecc21466c5b177e38568372ab66aab1f053ae04690a49cea09e747d"}, - {file = "numexpr-2.9.0-cp310-cp310-win32.whl", hash = "sha256:520e55d75bd99c76e376b6326e35ecf44c5ce2635a5caed72799a3885fc49173"}, - {file = "numexpr-2.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:5615497c3f34b637fda9b571f7774b6a82f2367cc1364b7a4573068dd1aabcaa"}, - {file = "numexpr-2.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bffcbc55dea5a5f5255e2586da08f00929998820e6592ee717273a08ad021eb3"}, - {file = "numexpr-2.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:374dc6ca54b2af813cb15c2b34e85092dfeac1f73d51ec358dd81876bd9adcec"}, - {file = "numexpr-2.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:549afc1622296cca3478a132c6e0fb5e55a19e08d32bc0d5a415434824a9c157"}, - {file = "numexpr-2.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c618a5895e34db0a364dcdb9960084c080f93f9d377c45b1ca9c394c24b4e77"}, - {file = "numexpr-2.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:37a7dd36fd79a2b69c3fd2bc2b51ac8270bebc69cc96e6d78f1148e147fcbfa8"}, - {file = "numexpr-2.9.0-cp311-cp311-win32.whl", hash = "sha256:00dab81d49239ea5423861ad627097b44d10d802df5f883d1b00f742139c3349"}, - {file = "numexpr-2.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:0e2574cafb18373774f351cac45ed23b5b360d9ecd1dbf3c12dac6d6eefefc87"}, - {file = "numexpr-2.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9761195526a228e05eba400b8c484c94bbabfea853b9ea35ab8fa1bf415331b1"}, - {file = "numexpr-2.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0f619e91034b346ea85a4e1856ff06011dcb7dce10a60eda75e74db90120f880"}, - {file = "numexpr-2.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2749bce1c48706d58894992634a43b8458c4ba9411191471c4565fa41e9979ec"}, - {file = "numexpr-2.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1c31f621a625c7be602f92b027d90f2d3d60dcbc19b106e77fb04a4362152af"}, - {file = "numexpr-2.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1a78b937861d13de67d440d54c85a835faed7572be5a6fd10d4f3bd4e66e157f"}, - {file = "numexpr-2.9.0-cp312-cp312-win32.whl", hash = "sha256:aa6298fb46bd7ec69911b5b80927a00663d066e719b29f48eb952d559bdd8371"}, - {file = "numexpr-2.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:8efd879839572bde5a38a1aa3ac23fd4dd9b956fb969bc5e43d1c403419e1e8c"}, - {file = "numexpr-2.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b04f12a6130094a251e3a8fff40130589c1c83be6d4eb223873bea14d8c8b630"}, - {file = "numexpr-2.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:977537f2a1cc843f888fb5f0507626f956ada674e4b3847168214a3f3c7446fa"}, - {file = "numexpr-2.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eae6c0c2d5682c02e8ac9c4287c2232c2443c9148b239df22500eaa3c5d73b7"}, - {file = "numexpr-2.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fae6828042b70c2f52a132bfcb9139da704274ed11b982fbf537f91c075d2ef"}, - {file = "numexpr-2.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7c77392aea53f0700d60eb270ad63174b4ff10b04f8de92861101ca2129fee51"}, - {file = "numexpr-2.9.0-cp39-cp39-win32.whl", hash = "sha256:3b03a6cf37a72f5b52f2b962d7ac7f565bea8eaba83c3c4e5fcf8fbb6a938153"}, - {file = "numexpr-2.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:d655b6eacc4e81006b662cba014e4615a9ddd96881b8b4db4ad0d7f6d38069af"}, - {file = "numexpr-2.9.0.tar.gz", hash = "sha256:f21d12f6c432ce349089eb95342babf6629aebb3fddf187a4492d3aadaadaaf0"}, + {file = "numexpr-2.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbd35f17f6efc00ebd4a480192af1ee30996094a0d5343b131b0e90e61e8b554"}, + {file = "numexpr-2.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fecdf4bf3c1250e56583db0a4a80382a259ba4c2e1efa13e04ed43f0938071f5"}, + {file = "numexpr-2.10.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b2efa499f460124538a5b4f1bf2e77b28eb443ee244cc5573ed0f6a069ebc635"}, + {file = "numexpr-2.10.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac23a72eff10f928f23b147bdeb0f1b774e862abe332fc9bf4837e9f1bc0bbf9"}, + {file = "numexpr-2.10.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b28eaf45f1cc1048aad9e90e3a8ada1aef58c5f8155a85267dc781b37998c046"}, + {file = "numexpr-2.10.1-cp310-cp310-win32.whl", hash = "sha256:4f0985bd1c493b23b5aad7d81fa174798f3812efb78d14844194834c9fee38b8"}, + {file = "numexpr-2.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:44f6d12a8c44be90199bbb10d3abf467f88951f48a3d1fbbd3c219d121f39c9d"}, + {file = "numexpr-2.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a3c0b0bf165b2d886eb981afa4e77873ca076f5d51c491c4d7b8fc10f17c876f"}, + {file = "numexpr-2.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56648a04679063175681195670ad53e5c8ca19668166ed13875199b5600089c7"}, + {file = "numexpr-2.10.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce04ae6efe2a9d0be1a0e114115c3ae70c68b8b8fbc615c5c55c15704b01e6a4"}, + {file = "numexpr-2.10.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45f598182b4f5c153222e47d5163c3bee8d5ebcaee7e56dd2a5898d4d97e4473"}, + {file = "numexpr-2.10.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a50370bea77ba94c3734a44781c716751354c6bfda2d369af3aed3d67d42871"}, + {file = "numexpr-2.10.1-cp311-cp311-win32.whl", hash = "sha256:fa4009d84a8e6e21790e718a80a22d57fe7f215283576ef2adc4183f7247f3c7"}, + {file = "numexpr-2.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:fcbf013bb8494e8ef1d11fa3457827c1571c6a3153982d709e5d17594999d4dd"}, + {file = "numexpr-2.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:82fc95c301b15ff4823f98989ee363a2d5555d16a7cfd3710e98ddee726eaaaa"}, + {file = "numexpr-2.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbf79fef834f88607f977ab9867061dcd9b40ccb08bb28547c6dc6c73e560895"}, + {file = "numexpr-2.10.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:552c8d4b2e3b87cdb2abb40a781b9a61a9090a9f66ac7357fc5a0b93aff76be3"}, + {file = "numexpr-2.10.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22cc65e9121aeb3187a2b50827715b2b087ea70e8ab21416ea52662322087b43"}, + {file = "numexpr-2.10.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:00204e5853713b5eba5f3d0bc586a5d8d07f76011b597c8b4087592cc2ec2928"}, + {file = "numexpr-2.10.1-cp312-cp312-win32.whl", hash = "sha256:82bf04a1495ac475de4ab49fbe0a3a2710ed3fd1a00bc03847316b5d7602402d"}, + {file = "numexpr-2.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:300e577b3c006dd7a8270f1bb2e8a00ee15bf235b1650fe2a6febec2954bc2c3"}, + {file = "numexpr-2.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fb704620657a1c99d64933e8a982148d8bfb2b738a1943e107a2bfdee887ce56"}, + {file = "numexpr-2.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:368a1972c3186355160f6ee330a7eea146d8443da75a38a30083289ae251ef5a"}, + {file = "numexpr-2.10.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ca8ae46481d0b0689ca0d00a8670bc464ce375e349599fe674a6d4957e7b7eb6"}, + {file = "numexpr-2.10.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a4db4456e0779d5e024220b7b6a7477ac900679bfa74836b06fa526aaed4e3c"}, + {file = "numexpr-2.10.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:926dd426c68f1d927412a2ad843831c1eb9a95871e7bb0bd8b20d547c12238d2"}, + {file = "numexpr-2.10.1-cp39-cp39-win32.whl", hash = "sha256:37598cca41f8f50dc889b0b72be1616a288758c16ab7d48c9ac8719e1a39d835"}, + {file = "numexpr-2.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:78b14c19c403df7498954468385768c86b0d2c52ad03dffb74e45d44ae5a9c77"}, + {file = "numexpr-2.10.1.tar.gz", hash = "sha256:9bba99d354a65f1a008ab8b87f07d84404c668e66bab624df5b6b5373403cf81"}, ] [package.dependencies] -numpy = ">=1.13.3" +numpy = ">=1.23.0" [[package]] name = "numpy" @@ -5539,13 +4548,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "oci" -version = "2.133.0" +version = "2.135.0" description = "Oracle Cloud Infrastructure Python SDK" optional = false python-versions = "*" files = [ - {file = "oci-2.133.0-py3-none-any.whl", hash = "sha256:9706365481ca538c89b3a15e6b5c246801eccb06be831a7f21c40f2a2ee310a7"}, - {file = "oci-2.133.0.tar.gz", hash = "sha256:800418025bb98f587c65bbf89c6b6d61ef0f2249e0698d73439baf3251640b7f"}, + {file = "oci-2.135.0-py3-none-any.whl", hash = "sha256:c01f1d103ed034fa7ca2bceb297bf00e6f6c456d14a46b35ee9007b25f3ea397"}, + {file = "oci-2.135.0.tar.gz", hash = "sha256:6e28e6595264705d8fd0719045ffc4b23170e7fd2cd76a1c3aa25e4cdaa5883a"}, ] [package.dependencies] @@ -5648,65 +4657,6 @@ typing-extensions = ">=4.7,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] -[[package]] -name = "opencensus" -version = "0.11.4" -description = "A stats collection and distributed tracing framework" -optional = false -python-versions = "*" -files = [ - {file = "opencensus-0.11.4-py2.py3-none-any.whl", hash = "sha256:a18487ce68bc19900336e0ff4655c5a116daf10c1b3685ece8d971bddad6a864"}, - {file = "opencensus-0.11.4.tar.gz", hash = "sha256:cbef87d8b8773064ab60e5c2a1ced58bbaa38a6d052c41aec224958ce544eff2"}, -] - -[package.dependencies] -google-api-core = {version = ">=1.0.0,<3.0.0", markers = "python_version >= \"3.6\""} -opencensus-context = ">=0.1.3" -six = ">=1.16,<2.0" - -[[package]] -name = "opencensus-context" -version = "0.1.3" -description = "OpenCensus Runtime Context" -optional = false -python-versions = "*" -files = [ - {file = "opencensus-context-0.1.3.tar.gz", hash = "sha256:a03108c3c10d8c80bb5ddf5c8a1f033161fa61972a9917f9b9b3a18517f0088c"}, - {file = "opencensus_context-0.1.3-py2.py3-none-any.whl", hash = "sha256:073bb0590007af276853009fac7e4bab1d523c3f03baf4cb4511ca38967c6039"}, -] - -[[package]] -name = "opencensus-ext-azure" -version = "1.1.13" -description = "OpenCensus Azure Monitor Exporter" -optional = false -python-versions = "*" -files = [ - {file = "opencensus-ext-azure-1.1.13.tar.gz", hash = "sha256:aec30472177005379ba56a702a097d618c5f57558e1bb6676ec75f948130692a"}, - {file = "opencensus_ext_azure-1.1.13-py2.py3-none-any.whl", hash = "sha256:06001fac6f8588ba00726a3a7c6c7f2fc88bc8ad12a65afdca657923085393dd"}, -] - -[package.dependencies] -azure-core = ">=1.12.0,<2.0.0" -azure-identity = ">=1.5.0,<2.0.0" -opencensus = ">=0.11.4,<1.0.0" -psutil = ">=5.6.3" -requests = ">=2.19.0" - -[[package]] -name = "opencensus-ext-logging" -version = "0.1.1" -description = "OpenCensus logging Integration" -optional = false -python-versions = "*" -files = [ - {file = "opencensus-ext-logging-0.1.1.tar.gz", hash = "sha256:c203b70f034151dada529f543af330ba17aaffec27d8a5267d03c713eb1de334"}, - {file = "opencensus_ext_logging-0.1.1-py2.py3-none-any.whl", hash = "sha256:cfdaf5da5d8b195ff3d1af87a4066a6621a28046173f6be4b0b6caec4a3ca89f"}, -] - -[package.dependencies] -opencensus = ">=0.8.0,<1.0.0" - [[package]] name = "openpyxl" version = "3.1.5" @@ -5721,30 +4671,6 @@ files = [ [package.dependencies] et-xmlfile = "*" -[[package]] -name = "opensearch-py" -version = "2.4.0" -description = "Python client for OpenSearch" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4" -files = [ - {file = "opensearch-py-2.4.0.tar.gz", hash = "sha256:7eba2b6ed2ddcf33225bfebfba2aee026877838cc39f760ec80f27827308cc4b"}, - {file = "opensearch_py-2.4.0-py2.py3-none-any.whl", hash = "sha256:316077235437c8ceac970232261f3393c65fb92a80f33c5b106f50f1dab24fd9"}, -] - -[package.dependencies] -certifi = ">=2022.12.07" -python-dateutil = "*" -requests = ">=2.4.0,<3.0.0" -six = "*" -urllib3 = ">=1.26.18" - -[package.extras] -async = ["aiohttp (>=3,<4)"] -develop = ["black", "botocore", "coverage (<8.0.0)", "jinja2", "mock", "myst-parser", "pytest (>=3.0.0)", "pytest-cov", "pytest-mock (<4.0.0)", "pytz", "pyyaml", "requests (>=2.0.0,<3.0.0)", "sphinx", "sphinx-copybutton", "sphinx-rtd-theme"] -docs = ["aiohttp (>=3,<4)", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-rtd-theme"] -kerberos = ["requests-kerberos"] - [[package]] name = "opentelemetry-api" version = "1.27.0" @@ -6059,40 +4985,53 @@ files = [ [[package]] name = "pandas" -version = "2.2.2" +version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" files = [ - {file = "pandas-2.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:90c6fca2acf139569e74e8781709dccb6fe25940488755716d1d354d6bc58bce"}, - {file = "pandas-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7adfc142dac335d8c1e0dcbd37eb8617eac386596eb9e1a1b77791cf2498238"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4abfe0be0d7221be4f12552995e58723c7422c80a659da13ca382697de830c08"}, - {file = "pandas-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8635c16bf3d99040fdf3ca3db669a7250ddf49c55dc4aa8fe0ae0fa8d6dcc1f0"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:40ae1dffb3967a52203105a077415a86044a2bea011b5f321c6aa64b379a3f51"}, - {file = "pandas-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8e5a0b00e1e56a842f922e7fae8ae4077aee4af0acb5ae3622bd4b4c30aedf99"}, - {file = "pandas-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ddf818e4e6c7c6f4f7c8a12709696d193976b591cc7dc50588d3d1a6b5dc8772"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:696039430f7a562b74fa45f540aca068ea85fa34c244d0deee539cb6d70aa288"}, - {file = "pandas-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e90497254aacacbc4ea6ae5e7a8cd75629d6ad2b30025a4a8b09aa4faf55151"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58b84b91b0b9f4bafac2a0ac55002280c094dfc6402402332c0913a59654ab2b"}, - {file = "pandas-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2123dc9ad6a814bcdea0f099885276b31b24f7edf40f6cdbc0912672e22eee"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2925720037f06e89af896c70bca73459d7e6a4be96f9de79e2d440bd499fe0db"}, - {file = "pandas-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0cace394b6ea70c01ca1595f839cf193df35d1575986e484ad35c4aeae7266c1"}, - {file = "pandas-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:873d13d177501a28b2756375d59816c365e42ed8417b41665f346289adc68d24"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9dfde2a0ddef507a631dc9dc4af6a9489d5e2e740e226ad426a05cabfbd7c8ef"}, - {file = "pandas-2.2.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e9b79011ff7a0f4b1d6da6a61aa1aa604fb312d6647de5bad20013682d1429ce"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cb51fe389360f3b5a4d57dbd2848a5f033350336ca3b340d1c53a1fad33bcad"}, - {file = "pandas-2.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee3a87076c0756de40b05c5e9a6069c035ba43e8dd71c379e68cab2c20f16ad"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3e374f59e440d4ab45ca2fffde54b81ac3834cf5ae2cdfa69c90bc03bde04d76"}, - {file = "pandas-2.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:43498c0bdb43d55cb162cdc8c06fac328ccb5d2eabe3cadeb3529ae6f0517c32"}, - {file = "pandas-2.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:d187d355ecec3629624fccb01d104da7d7f391db0311145817525281e2804d23"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0ca6377b8fca51815f382bd0b697a0814c8bda55115678cbc94c30aacbb6eff2"}, - {file = "pandas-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9057e6aa78a584bc93a13f0a9bf7e753a5e9770a30b4d758b8d5f2a62a9433cd"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:001910ad31abc7bf06f49dcc903755d2f7f3a9186c0c040b827e522e9cef0863"}, - {file = "pandas-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:66b479b0bd07204e37583c191535505410daa8df638fd8e75ae1b383851fe921"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a77e9d1c386196879aa5eb712e77461aaee433e54c68cf253053a73b7e49c33a"}, - {file = "pandas-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92fd6b027924a7e178ac202cfbe25e53368db90d56872d20ffae94b96c7acc57"}, - {file = "pandas-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:640cef9aa381b60e296db324337a554aeeb883ead99dc8f6c18e81a93942f5f4"}, - {file = "pandas-2.2.2.tar.gz", hash = "sha256:9e79019aba43cb4fda9e4d983f8e88ca0373adbb697ae9c6c43093218de28b54"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, + {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, + {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, + {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, + {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, + {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, + {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"}, + {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"}, + {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"}, + {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"}, + {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"}, + {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"}, + {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"}, + {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"}, + {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"}, + {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"}, + {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"}, + {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"}, + {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"}, + {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"}, + {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"}, + {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"}, + {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"}, + {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"}, + {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"}, ] [package.dependencies] @@ -6156,16 +5095,6 @@ multiprocess = ">=0.70.16" pox = ">=0.3.4" ppft = ">=1.7.6.8" -[[package]] -name = "peewee" -version = "3.17.6" -description = "a little orm" -optional = false -python-versions = "*" -files = [ - {file = "peewee-3.17.6.tar.gz", hash = "sha256:cea5592c6f4da1592b7cff8eaf655be6648a1f5857469e30037bf920c03fb8fb"}, -] - [[package]] name = "pgvecto-rs" version = "0.2.1" @@ -6300,13 +5229,13 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.3" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, - {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] @@ -6314,21 +5243,6 @@ docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-a test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] type = ["mypy (>=1.11.2)"] -[[package]] -name = "plotly" -version = "5.24.1" -description = "An open-source, interactive data visualization library for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "plotly-5.24.1-py3-none-any.whl", hash = "sha256:f67073a1e637eb0dc3e46324d9d51e2fe76e9727c892dde64ddf1e1b51f29089"}, - {file = "plotly-5.24.1.tar.gz", hash = "sha256:dbc8ac8339d248a4bcc36e08a5659bacfe1b079390b8953533f4eb22169b4bae"}, -] - -[package.dependencies] -packaging = "*" -tenacity = ">=6.2.0" - [[package]] name = "pluggy" version = "1.5.0" @@ -6376,13 +5290,13 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p [[package]] name = "posthog" -version = "3.6.5" +version = "3.6.6" description = "Integrate PostHog into any python application." optional = false python-versions = "*" files = [ - {file = "posthog-3.6.5-py2.py3-none-any.whl", hash = "sha256:f8b7c573826b061a1d22c9495169c38ebe83a1df2729f49c7129a9c23a02acf6"}, - {file = "posthog-3.6.5.tar.gz", hash = "sha256:7fd3ca809e15476c35f75d18cd6bba31395daf0a17b75242965c469fb6292510"}, + {file = "posthog-3.6.6-py2.py3-none-any.whl", hash = "sha256:38834fd7f0732582a20d4eb4674c8d5c088e464d14d1b3f8c176e389aecaa4ef"}, + {file = "posthog-3.6.6.tar.gz", hash = "sha256:1e04783293117109189ad7048f3eedbe21caff0e39bee5e2d47a93dd790fefac"}, ] [package.dependencies] @@ -6399,58 +5313,38 @@ test = ["coverage", "django", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)" [[package]] name = "pox" -version = "0.3.4" +version = "0.3.5" description = "utilities for filesystem exploration and automated builds" optional = false python-versions = ">=3.8" files = [ - {file = "pox-0.3.4-py3-none-any.whl", hash = "sha256:651b8ae8a7b341b7bfd267f67f63106daeb9805f1ac11f323d5280d2da93fdb6"}, - {file = "pox-0.3.4.tar.gz", hash = "sha256:16e6eca84f1bec3828210b06b052adf04cf2ab20c22fd6fbef5f78320c9a6fed"}, + {file = "pox-0.3.5-py3-none-any.whl", hash = "sha256:9e82bcc9e578b43e80a99cad80f0d8f44f4d424f0ee4ee8d4db27260a6aa365a"}, + {file = "pox-0.3.5.tar.gz", hash = "sha256:8120ee4c94e950e6e0483e050a4f0e56076e590ba0a9add19524c254bd23c2d1"}, ] [[package]] name = "ppft" -version = "1.7.6.8" +version = "1.7.6.9" description = "distributed and parallel Python" optional = false python-versions = ">=3.8" files = [ - {file = "ppft-1.7.6.8-py3-none-any.whl", hash = "sha256:de2dd4b1b080923dd9627fbdea52649fd741c752fce4f3cf37e26f785df23d9b"}, - {file = "ppft-1.7.6.8.tar.gz", hash = "sha256:76a429a7d7b74c4d743f6dba8351e58d62b6432ed65df9fe204790160dab996d"}, + {file = "ppft-1.7.6.9-py3-none-any.whl", hash = "sha256:dab36548db5ca3055067fbe6b1a17db5fee29f3c366c579a9a27cebb52ed96f0"}, + {file = "ppft-1.7.6.9.tar.gz", hash = "sha256:73161c67474ea9d81d04bcdad166d399cff3f084d5d2dc21ebdd46c075bbc265"}, ] [package.extras] -dill = ["dill (>=0.3.8)"] - -[[package]] -name = "primp" -version = "0.6.2" -description = "HTTP client that can impersonate web browsers, mimicking their headers and `TLS/JA3/JA4/HTTP2` fingerprints" -optional = false -python-versions = ">=3.8" -files = [ - {file = "primp-0.6.2-cp38-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4a35d441462a55d9a9525bf170e2ffd2fcb3db6039b23e802859fa22c18cdd51"}, - {file = "primp-0.6.2-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:f67ccade95bdbca3cf9b96b93aa53f9617d85ddbf988da4e9c523aa785fd2d54"}, - {file = "primp-0.6.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8074b93befaf36567e4cf3d4a1a8cd6ab9cc6e4dd4ff710650678daa405aee71"}, - {file = "primp-0.6.2-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7d3e2a3f8c6262e9b883651b79c4ff2b7677a76f47293a139f541c9ea333ce3b"}, - {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a460ea389371c6d04839b4b50b5805d99da8ebe281a2e8b534d27377c6d44f0e"}, - {file = "primp-0.6.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b6b27e89d3c05c811aff0e4fde7a36d6957b15b3112f4ce28b6b99e8ca1e725"}, - {file = "primp-0.6.2-cp38-abi3-win_amd64.whl", hash = "sha256:1006a40a85f88a4c5222094813a1ebc01f85a63e9a33d2c443288c0720bed321"}, - {file = "primp-0.6.2.tar.gz", hash = "sha256:5a96a6b65195a8a989157e67d23bd171c49be238654e02bdf1b1fda36cbcc068"}, -] - -[package.extras] -dev = ["certifi", "pytest (>=8.1.1)"] +dill = ["dill (>=0.3.9)"] [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -6475,22 +5369,22 @@ testing = ["google-api-core (>=1.31.5)"] [[package]] name = "protobuf" -version = "4.25.4" +version = "4.25.5" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-4.25.4-cp310-abi3-win32.whl", hash = "sha256:db9fd45183e1a67722cafa5c1da3e85c6492a5383f127c86c4c4aa4845867dc4"}, - {file = "protobuf-4.25.4-cp310-abi3-win_amd64.whl", hash = "sha256:ba3d8504116a921af46499471c63a85260c1a5fc23333154a427a310e015d26d"}, - {file = "protobuf-4.25.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:eecd41bfc0e4b1bd3fa7909ed93dd14dd5567b98c941d6c1ad08fdcab3d6884b"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:4c8a70fdcb995dcf6c8966cfa3a29101916f7225e9afe3ced4395359955d3835"}, - {file = "protobuf-4.25.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3319e073562e2515c6ddc643eb92ce20809f5d8f10fead3332f71c63be6a7040"}, - {file = "protobuf-4.25.4-cp38-cp38-win32.whl", hash = "sha256:7e372cbbda66a63ebca18f8ffaa6948455dfecc4e9c1029312f6c2edcd86c4e1"}, - {file = "protobuf-4.25.4-cp38-cp38-win_amd64.whl", hash = "sha256:051e97ce9fa6067a4546e75cb14f90cf0232dcb3e3d508c448b8d0e4265b61c1"}, - {file = "protobuf-4.25.4-cp39-cp39-win32.whl", hash = "sha256:90bf6fd378494eb698805bbbe7afe6c5d12c8e17fca817a646cd6a1818c696ca"}, - {file = "protobuf-4.25.4-cp39-cp39-win_amd64.whl", hash = "sha256:ac79a48d6b99dfed2729ccccee547b34a1d3d63289c71cef056653a846a2240f"}, - {file = "protobuf-4.25.4-py3-none-any.whl", hash = "sha256:bfbebc1c8e4793cfd58589acfb8a1026be0003e852b9da7db5a4285bde996978"}, - {file = "protobuf-4.25.4.tar.gz", hash = "sha256:0dc4a62cc4052a036ee2204d26fe4d835c62827c855c8a03f29fe6da146b380d"}, + {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, + {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, + {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, + {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, + {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, + {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, + {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, + {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, + {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, + {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, ] [[package]] @@ -6904,23 +5798,6 @@ azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0 toml = ["tomli (>=2.0.1)"] yaml = ["pyyaml (>=6.0.1)"] -[[package]] -name = "pydash" -version = "8.0.3" -description = "The kitchen sink of Python utility libraries for doing \"stuff\" in a functional way. Based on the Lo-Dash Javascript library." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydash-8.0.3-py3-none-any.whl", hash = "sha256:c16871476822ee6b59b87e206dd27888240eff50a7b4cd72a4b80b43b6b994d7"}, - {file = "pydash-8.0.3.tar.gz", hash = "sha256:1b27cd3da05b72f0e5ff786c523afd82af796936462e631ffd1b228d91f8b9aa"}, -] - -[package.dependencies] -typing-extensions = ">3.10,<4.6.0 || >4.6.0" - -[package.extras] -dev = ["build", "coverage", "furo", "invoke", "mypy", "pytest", "pytest-cov", "pytest-mypy-testing", "ruff", "sphinx", "sphinx-autodoc-typehints", "tox", "twine", "wheel"] - [[package]] name = "pygments" version = "2.18.0" @@ -6957,13 +5834,13 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pymilvus" -version = "2.4.6" +version = "2.4.7" description = "Python Sdk for Milvus" optional = false python-versions = ">=3.8" files = [ - {file = "pymilvus-2.4.6-py3-none-any.whl", hash = "sha256:b4c43472edc313b845d313be50610e19054e6954b2c5c3b515565c596c2d3d97"}, - {file = "pymilvus-2.4.6.tar.gz", hash = "sha256:6ac3eb91c92cc01bbe444fe83f895f02d7b2546d96ac67998630bf31ac074d66"}, + {file = "pymilvus-2.4.7-py3-none-any.whl", hash = "sha256:1e5d377bd40fa7eb459d3958dbd96201758f5cf997d41eb3d2d169d0b7fa462e"}, + {file = "pymilvus-2.4.7.tar.gz", hash = "sha256:9ef460b940782a42e1b7b8ae0da03d8cc02d9d80044d13f4b689a7c935ec7aa7"}, ] [package.dependencies] @@ -6980,21 +5857,6 @@ bulk-writer = ["azure-storage-blob", "minio (>=7.0.0)", "pyarrow (>=12.0.0)", "r dev = ["black", "grpcio (==1.62.2)", "grpcio-testing (==1.62.2)", "grpcio-tools (==1.62.2)", "pytest (>=5.3.4)", "pytest-cov (>=2.8.1)", "pytest-timeout (>=1.3.4)", "ruff (>0.4.0)"] model = ["milvus-model (>=0.1.0)"] -[[package]] -name = "pymysql" -version = "1.1.1" -description = "Pure Python MySQL Driver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "PyMySQL-1.1.1-py3-none-any.whl", hash = "sha256:4de15da4c61dc132f4fb9ab763063e693d521a80fd0e87943b9a453dd4c19d6c"}, - {file = "pymysql-1.1.1.tar.gz", hash = "sha256:e127611aaf2b417403c60bf4dc570124aeb4a57f5f37b8e95ae399a42f904cd0"}, -] - -[package.extras] -ed25519 = ["PyNaCl (>=1.4.0)"] -rsa = ["cryptography"] - [[package]] name = "pyopenssl" version = "24.2.1" @@ -7082,28 +5944,28 @@ files = [ [[package]] name = "pyproject-hooks" -version = "1.1.0" +version = "1.2.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = false python-versions = ">=3.7" files = [ - {file = "pyproject_hooks-1.1.0-py3-none-any.whl", hash = "sha256:7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2"}, - {file = "pyproject_hooks-1.1.0.tar.gz", hash = "sha256:4b37730834edbd6bd37f26ece6b44802fb1c1ee2ece0e54ddff8bfc06db86965"}, + {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, + {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, ] [[package]] name = "pyreadline3" -version = "3.5.2" +version = "3.5.4" description = "A python implementation of GNU readline." optional = false python-versions = ">=3.8" files = [ - {file = "pyreadline3-3.5.2-py3-none-any.whl", hash = "sha256:a87d56791e2965b2b187e2ea33dcf664600842c997c0623c95cf8ef07db83de9"}, - {file = "pyreadline3-3.5.2.tar.gz", hash = "sha256:ba82292e52c5a3bb256b291af0c40b457c1e8699cac9a873abbcaac8aef3a1bb"}, + {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, + {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, ] [package.extras] -dev = ["build", "flake8", "pytest", "twine"] +dev = ["build", "flake8", "mypy", "pytest", "twine"] [[package]] name = "pytest" @@ -7149,21 +6011,21 @@ histogram = ["pygal", "pygaljs"] [[package]] name = "pytest-env" -version = "1.1.4" +version = "1.1.5" description = "pytest plugin that allows you to add environment variables." optional = false python-versions = ">=3.8" files = [ - {file = "pytest_env-1.1.4-py3-none-any.whl", hash = "sha256:a4212056d4d440febef311a98fdca56c31256d58fb453d103cba4e8a532b721d"}, - {file = "pytest_env-1.1.4.tar.gz", hash = "sha256:86653658da8f11c6844975db955746c458a9c09f1e64957603161e2ff93f5133"}, + {file = "pytest_env-1.1.5-py3-none-any.whl", hash = "sha256:ce90cf8772878515c24b31cd97c7fa1f4481cd68d588419fd45f10ecaee6bc30"}, + {file = "pytest_env-1.1.5.tar.gz", hash = "sha256:91209840aa0e43385073ac464a554ad2947cc2fd663a9debf88d03b01e0cc1cf"}, ] [package.dependencies] -pytest = ">=8.3.2" +pytest = ">=8.3.3" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} [package.extras] -test = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "pytest-mock (>=3.14)"] [[package]] name = "pytest-mock" @@ -7550,145 +6412,103 @@ dev = ["pytest"] [[package]] name = "rapidfuzz" -version = "3.9.7" +version = "3.10.0" description = "rapid fuzzy string matching" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ccf68e30b80e903f2309f90a438dbd640dd98e878eeb5ad361a288051ee5b75c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:696a79018ef989bf1c9abd9005841cee18005ccad4748bad8a4c274c47b6241a"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eebf6c93af0ae866c22b403a84747580bb5c10f0d7b51c82a87f25405d4dcb"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e9125377fa3d21a8abd4fbdbcf1c27be73e8b1850f0b61b5b711364bf3b59db"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c12d180b17a22d107c8747de9c68d0b9c1d15dcda5445ff9bf9f4ccfb67c3e16"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1318d42610c26dcd68bd3279a1bf9e3605377260867c9a8ed22eafc1bd93a7c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd5fa6e3c6e0333051c1f3a49f0807b3366f4131c8d6ac8c3e05fd0d0ce3755c"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fcf79b686962d7bec458a0babc904cb4fa319808805e036b9d5a531ee6b9b835"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8b01153c7466d0bad48fba77a303d5a768e66f24b763853469f47220b3de4661"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:94baaeea0b4f8632a6da69348b1e741043eba18d4e3088d674d3f76586b6223d"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6c5b32875646cb7f60c193ade99b2e4b124f19583492115293cd00f6fb198b17"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:110b6294396bc0a447648627479c9320f095c2034c0537f687592e0f58622638"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win32.whl", hash = "sha256:3445a35c4c8d288f2b2011eb61bce1227c633ce85a3154e727170f37c0266bb2"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:0d1415a732ee75e74a90af12020b77a0b396b36c60afae1bde3208a78cd2c9fc"}, - {file = "rapidfuzz-3.9.7-cp310-cp310-win_arm64.whl", hash = "sha256:836f4d88b8bd0fff2ebe815dcaab8aa6c8d07d1d566a7e21dd137cf6fe11ed5b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d098ce6162eb5e48fceb0745455bc950af059df6113eec83e916c129fca11408"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:048d55d36c02c6685a2b2741688503c3d15149694506655b6169dcfd3b6c2585"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c33211cfff9aec425bb1bfedaf94afcf337063aa273754f22779d6dadebef4c2"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6d9db2fa4e9be171e9bb31cf2d2575574774966b43f5b951062bb2e67885852"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4e049d5ad61448c9a020d1061eba20944c4887d720c4069724beb6ea1692507"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cfa74aac64c85898b93d9c80bb935a96bf64985e28d4ee0f1a3d1f3bf11a5106"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:965693c2e9efd425b0f059f5be50ef830129f82892fa1858e220e424d9d0160f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8501000a5eb8037c4b56857724797fe5a8b01853c363de91c8d0d0ad56bef319"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d92c552c6b7577402afdd547dcf5d31ea6c8ae31ad03f78226e055cfa37f3c6"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1ee2086f490cb501d86b7e386c1eb4e3a0ccbb0c99067089efaa8c79012c8952"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:1de91e7fd7f525e10ea79a6e62c559d1b0278ec097ad83d9da378b6fab65a265"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4da514d13f4433e16960a17f05b67e0af30ac771719c9a9fb877e5004f74477"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win32.whl", hash = "sha256:a40184c67db8252593ec518e17fb8a6e86d7259dc9f2d6c0bf4ff4db8cf1ad4b"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:c4f28f1930b09a2c300357d8465b388cecb7e8b2f454a5d5425561710b7fd07f"}, - {file = "rapidfuzz-3.9.7-cp311-cp311-win_arm64.whl", hash = "sha256:675b75412a943bb83f1f53e2e54fd18c80ef15ed642dc6eb0382d1949419d904"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1ef6a1a8f0b12f8722f595f15c62950c9a02d5abc64742561299ffd49f6c6944"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32532af1d70c6ec02ea5ac7ee2766dfff7c8ae8c761abfe8da9e527314e634e8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae1a38bade755aa9dd95a81cda949e1bf9cd92b79341ccc5e2189c9e7bdfc5ec"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d73ee2df41224c87336448d279b5b6a3a75f36e41dd3dcf538c0c9cce36360d8"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be3a1fc3e2ab3bdf93dc0c83c00acca8afd2a80602297d96cf4a0ba028333cdf"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:603f48f621272a448ff58bb556feb4371252a02156593303391f5c3281dfaeac"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:268f8e1ca50fc61c0736f3fe9d47891424adf62d96ed30196f30f4bd8216b41f"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f8bf3f0d02935751d8660abda6044821a861f6229f7d359f98bcdcc7e66c39b"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b997ff3b39d4cee9fb025d6c46b0a24bd67595ce5a5b652a97fb3a9d60beb651"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ca66676c8ef6557f9b81c5b2b519097817a7c776a6599b8d6fcc3e16edd216fe"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:35d3044cb635ca6b1b2b7b67b3597bd19f34f1753b129eb6d2ae04cf98cd3945"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5a93c9e60904cb76e7aefef67afffb8b37c4894f81415ed513db090f29d01101"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win32.whl", hash = "sha256:579d107102c0725f7c79b4e79f16d3cf4d7c9208f29c66b064fa1fd4641d5155"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:953b3780765c8846866faf891ee4290f6a41a6dacf4fbcd3926f78c9de412ca6"}, - {file = "rapidfuzz-3.9.7-cp312-cp312-win_arm64.whl", hash = "sha256:7c20c1474b068c4bd45bf2fd0ad548df284f74e9a14a68b06746c56e3aa8eb70"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fde81b1da9a947f931711febe2e2bee694e891f6d3e6aa6bc02c1884702aea19"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:47e92c155a14f44511ea8ebcc6bc1535a1fe8d0a7d67ad3cc47ba61606df7bcf"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8772b745668260c5c4d069c678bbaa68812e6c69830f3771eaad521af7bc17f8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:578302828dd97ee2ba507d2f71d62164e28d2fc7bc73aad0d2d1d2afc021a5d5"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc3e6081069eea61593f1d6839029da53d00c8c9b205c5534853eaa3f031085c"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0b1c2d504eddf97bc0f2eba422c8915576dbf025062ceaca2d68aecd66324ad9"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb76e5a21034f0307c51c5a2fc08856f698c53a4c593b17d291f7d6e9d09ca3"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d4ba2318ef670ce505f42881a5d2af70f948124646947341a3c6ccb33cd70369"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:057bb03f39e285047d7e9412e01ecf31bb2d42b9466a5409d715d587460dd59b"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a8feac9006d5c9758438906f093befffc4290de75663dbb2098461df7c7d28dd"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:95b8292383e717e10455f2c917df45032b611141e43d1adf70f71b1566136b11"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e9fbf659537d246086d0297628b3795dc3e4a384101ecc01e5791c827b8d7345"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win32.whl", hash = "sha256:1dc516ac6d32027be2b0196bedf6d977ac26debd09ca182376322ad620460feb"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_amd64.whl", hash = "sha256:b4f86e09d3064dca0b014cd48688964036a904a2d28048f00c8f4640796d06a8"}, - {file = "rapidfuzz-3.9.7-cp313-cp313-win_arm64.whl", hash = "sha256:19c64d8ddb2940b42a4567b23f1681af77f50a5ff6c9b8e85daba079c210716e"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fbda3dd68d8b28ccb20ffb6f756fefd9b5ba570a772bedd7643ed441f5793308"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2379e0b2578ad3ac7004f223251550f08bca873ff76c169b09410ec562ad78d8"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d1eff95362f993b0276fd3839aee48625b09aac8938bb0c23b40d219cba5dc5"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd9360e30041690912525a210e48a897b49b230768cc8af1c702e5395690464f"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a93cd834b3c315ab437f0565ee3a2f42dd33768dc885ccbabf9710b131cf70d2"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff196996240db7075f62c7bc4506f40a3c80cd4ae3ab0e79ac6892283a90859"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948dcee7aaa1cd14358b2a7ef08bf0be42bf89049c3a906669874a715fc2c937"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95751f505a301af1aaf086c19f34536056d6c8efa91b2240de532a3db57b543"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:90db86fa196eecf96cb6db09f1083912ea945c50c57188039392d810d0b784e1"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:3171653212218a162540a3c8eb8ae7d3dcc8548540b69eaecaf3b47c14d89c90"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:36dd6e820379c37a1ffefc8a52b648758e867cd9d78ee5b5dc0c9a6a10145378"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7b702de95666a1f7d5c6b47eacadfe2d2794af3742d63d2134767d13e5d1c713"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win32.whl", hash = "sha256:9030e7238c0df51aed5c9c5ed8eee2bdd47a2ae788e562c1454af2851c3d1906"}, - {file = "rapidfuzz-3.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:f847fb0fbfb72482b1c05c59cbb275c58a55b73708a7f77a83f8035ee3c86497"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:97f2ce529d2a70a60c290f6ab269a2bbf1d3b47b9724dccc84339b85f7afb044"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e2957fdad10bb83b1982b02deb3604a3f6911a5e545f518b59c741086f92d152"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d5262383634626eb45c536017204b8163a03bc43bda880cf1bdd7885db9a163"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:364587827d7cbd41afa0782adc2d2d19e3f07d355b0750a02a8e33ad27a9c368"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecc24af7f905f3d6efb371a01680116ffea8d64e266618fb9ad1602a9b4f7934"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dc86aa6b29d174713c5f4caac35ffb7f232e3e649113e8d13812b35ab078228"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3dcfbe7266e74a707173a12a7b355a531f2dcfbdb32f09468e664330da14874"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b23806fbdd6b510ba9ac93bb72d503066263b0fba44b71b835be9f063a84025f"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:5551d68264c1bb6943f542da83a4dc8940ede52c5847ef158698799cc28d14f5"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:13d8675a1fa7e2b19650ca7ef9a6ec01391d4bb12ab9e0793e8eb024538b4a34"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9b6a5de507b9be6de688dae40143b656f7a93b10995fb8bd90deb555e7875c60"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:111a20a3c090cf244d9406e60500b6c34b2375ba3a5009e2b38fd806fe38e337"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win32.whl", hash = "sha256:22589c0b8ccc6c391ce7f776c93a8c92c96ab8d34e1a19f1bd2b12a235332632"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:6f83221db5755b8f34222e40607d87f1176a8d5d4dbda4a55a0f0b67d588a69c"}, - {file = "rapidfuzz-3.9.7-cp39-cp39-win_arm64.whl", hash = "sha256:3665b92e788578c3bb334bd5b5fa7ee1a84bafd68be438e3110861d1578c63a0"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d7df9c2194c7ec930b33c991c55dbd0c10951bd25800c0b7a7b571994ebbced5"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68bd888eafd07b09585dcc8bc2716c5ecdb7eed62827470664d25588982b2873"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1230e0f9026851a6a432beaa0ce575dda7b39fe689b576f99a0704fbb81fc9c"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3b36e1c61b796ae1777f3e9e11fd39898b09d351c9384baf6e3b7e6191d8ced"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9dba13d86806fcf3fe9c9919f58575e0090eadfb89c058bde02bcc7ab24e4548"}, - {file = "rapidfuzz-3.9.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1f1a33e84056b7892c721d84475d3bde49a145126bc4c6efe0d6d0d59cb31c29"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3492c7a42b7fa9f0051d7fcce9893e95ed91c97c9ec7fb64346f3e070dd318ed"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ece45eb2af8b00f90d10f7419322e8804bd42fb1129026f9bfe712c37508b514"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcd14cf4876f04b488f6e54a7abd3e9b31db5f5a6aba0ce90659917aaa8c088"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:521c58c72ed8a612b25cda378ff10dee17e6deb4ee99a070b723519a345527b9"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18669bb6cdf7d40738526d37e550df09ba065b5a7560f3d802287988b6cb63cf"}, - {file = "rapidfuzz-3.9.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7abe2dbae81120a64bb4f8d3fcafe9122f328c9f86d7f327f174187a5af4ed86"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a3c0783910911f4f24655826d007c9f4360f08107410952c01ee3df98c713eb2"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:03126f9a040ff21d2a110610bfd6b93b79377ce8b4121edcb791d61b7df6eec5"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:591908240f4085e2ade5b685c6e8346e2ed44932cffeaac2fb32ddac95b55c7f"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9012d86c6397edbc9da4ac0132de7f8ee9d6ce857f4194d5684c4ddbcdd1c5c"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df596ddd3db38aa513d4c0995611267b3946e7cbe5a8761b50e9306dfec720ee"}, - {file = "rapidfuzz-3.9.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3ed5adb752f4308fcc8f4fb6f8eb7aa4082f9d12676fda0a74fa5564242a8107"}, - {file = "rapidfuzz-3.9.7.tar.gz", hash = "sha256:f1c7296534c1afb6f495aa95871f14ccdc197c6db42965854e483100df313030"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:884453860de029380dded8f3c1918af2d8eb5adf8010261645c7e5c88c2b5428"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:718c9bd369288aca5fa929df6dbf66fdbe9768d90940a940c0b5cdc96ade4309"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a68e3724b7dab761c01816aaa64b0903734d999d5589daf97c14ef5cc0629a8e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1af60988d47534246d9525f77288fdd9de652608a4842815d9018570b959acc6"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3084161fc3e963056232ef8d937449a2943852e07101f5a136c8f3cfa4119217"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cd67d3d017296d98ff505529104299f78433e4b8af31b55003d901a62bbebe9"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b11a127ac590fc991e8a02c2d7e1ac86e8141c92f78546f18b5c904064a0552c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aadce42147fc09dcef1afa892485311e824c050352e1aa6e47f56b9b27af4cf0"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b54853c2371bf0e38d67da379519deb6fbe70055efb32f6607081641af3dc752"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ce19887268e90ee81a3957eef5e46a70ecc000713796639f83828b950343f49e"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f39a2a5ded23b9b9194ec45740dce57177b80f86c6d8eba953d3ff1a25c97766"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0ec338d5f4ad8d9339a88a08db5c23e7f7a52c2b2a10510c48a0cef1fb3f0ddc"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win32.whl", hash = "sha256:56fd15ea8f4c948864fa5ebd9261c67cf7b89a1c517a0caef4df75446a7af18c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:43dfc5e733808962a822ff6d9c29f3039a3cfb3620706f5953e17cfe4496724c"}, + {file = "rapidfuzz-3.10.0-cp310-cp310-win_arm64.whl", hash = "sha256:ae7966f205b5a7fde93b44ca8fed37c1c8539328d7f179b1197de34eceaceb5f"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb0013795b40db5cf361e6f21ee7cda09627cf294977149b50e217d7fe9a2f03"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:69ef5b363afff7150a1fbe788007e307b9802a2eb6ad92ed51ab94e6ad2674c6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c582c46b1bb0b19f1a5f4c1312f1b640c21d78c371a6615c34025b16ee56369b"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:288f6f6e7410cacb115fb851f3f18bf0e4231eb3f6cb5bd1cec0e7b25c4d039d"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9e29a13d2fd9be3e7d8c26c7ef4ba60b5bc7efbc9dbdf24454c7e9ebba31768"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea2da0459b951ee461bd4e02b8904890bd1c4263999d291c5cd01e6620177ad4"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:457827ba82261aa2ae6ac06a46d0043ab12ba7216b82d87ae1434ec0f29736d6"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5d350864269d56f51ab81ab750c9259ae5cad3152c0680baef143dcec92206a1"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a9b8f51e08c3f983d857c3889930af9ddecc768453822076683664772d87e374"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7f3a6aa6e70fc27e4ff5c479f13cc9fc26a56347610f5f8b50396a0d344c5f55"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:803f255f10d63420979b1909ef976e7d30dec42025c9b067fc1d2040cc365a7e"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2026651761bf83a0f31495cc0f70840d5c0d54388f41316e3f9cb51bd85e49a5"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win32.whl", hash = "sha256:4df75b3ebbb8cfdb9bf8b213b168620b88fd92d0c16a8bc9f9234630b282db59"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f9f0bbfb6787b97c51516f3ccf97737d504db5d239ad44527673b81f598b84ab"}, + {file = "rapidfuzz-3.10.0-cp311-cp311-win_arm64.whl", hash = "sha256:10fdad800441b9c97d471a937ba7d42625f1b530db05e572f1cb7d401d95c893"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dc87073ba3a40dd65591a2100aa71602107443bf10770579ff9c8a3242edb94"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a425a0a868cf8e9c6e93e1cda4b758cdfd314bb9a4fc916c5742c934e3613480"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d5d1d75e61df060c1e56596b6b0a4422a929dff19cc3dbfd5eee762c86b61"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34f213d59219a9c3ca14e94a825f585811a68ac56b4118b4dc388b5b14afc108"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96ad46f5f56f70fab2be9e5f3165a21be58d633b90bf6e67fc52a856695e4bcf"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9178277f72d144a6c7704d7ae7fa15b7b86f0f0796f0e1049c7b4ef748a662ef"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76a35e9e19a7c883c422ffa378e9a04bc98cb3b29648c5831596401298ee51e6"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8a6405d34c394c65e4f73a1d300c001f304f08e529d2ed6413b46ee3037956eb"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bd393683129f446a75d8634306aed7e377627098a1286ff3af2a4f1736742820"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b0445fa9880ead81f5a7d0efc0b9c977a947d8052c43519aceeaf56eabaf6843"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c50bc308fa29767ed8f53a8d33b7633a9e14718ced038ed89d41b886e301da32"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e89605afebbd2d4b045bccfdc12a14b16fe8ccbae05f64b4b4c64a97dad1c891"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win32.whl", hash = "sha256:2db9187f3acf3cd33424ecdbaad75414c298ecd1513470df7bda885dcb68cc15"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:50e3d0c72ea15391ba9531ead7f2068a67c5b18a6a365fef3127583aaadd1725"}, + {file = "rapidfuzz-3.10.0-cp312-cp312-win_arm64.whl", hash = "sha256:9eac95b4278bd53115903d89118a2c908398ee8bdfd977ae844f1bd2b02b917c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:fe5231e8afd069c742ac5b4f96344a0fe4aff52df8e53ef87faebf77f827822c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:886882367dbc985f5736356105798f2ae6e794e671fc605476cbe2e73838a9bb"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b33e13e537e3afd1627d421a142a12bbbe601543558a391a6fae593356842f6e"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:094c26116d55bf9c53abd840d08422f20da78ec4c4723e5024322321caedca48"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:545fc04f2d592e4350f59deb0818886c1b444ffba3bec535b4fbb97191aaf769"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:916a6abf3632e592b937c3d04c00a6efadd8fd30539cdcd4e6e4d92be7ca5d90"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6ec40cef63b1922083d33bfef2f91fc0b0bc07b5b09bfee0b0f1717d558292"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c77a7330dd15c7eb5fd3631dc646fc96327f98db8181138766bd14d3e905f0ba"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:949b5e9eeaa4ecb4c7e9c2a4689dddce60929dd1ff9c76a889cdbabe8bbf2171"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b5363932a5aab67010ae1a6205c567d1ef256fb333bc23c27582481606be480c"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5dd6eec15b13329abe66cc241b484002ecb0e17d694491c944a22410a6a9e5e2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79e7f98525b60b3c14524e0a4e1fedf7654657b6e02eb25f1be897ab097706f3"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win32.whl", hash = "sha256:d29d1b9857c65f8cb3a29270732e1591b9bacf89de9d13fa764f79f07d8f1fd2"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:fa9720e56663cc3649d62b4b5f3145e94b8f5611e8a8e1b46507777249d46aad"}, + {file = "rapidfuzz-3.10.0-cp313-cp313-win_arm64.whl", hash = "sha256:eda4c661e68dddd56c8fbfe1ca35e40dd2afd973f7ebb1605f4d151edc63dff8"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cffbc50e0767396ed483900900dd58ce4351bc0d40e64bced8694bd41864cc71"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c038b9939da3035afb6cb2f465f18163e8f070aba0482923ecff9443def67178"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca366c2e2a54e2f663f4529b189fdeb6e14d419b1c78b754ec1744f3c01070d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c4c82b1689b23b1b5e6a603164ed2be41b6f6de292a698b98ba2381e889eb9d"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98f6ebe28831a482981ecfeedc8237047878424ad0c1add2c7f366ba44a20452"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd1a7676ee2a4c8e2f7f2550bece994f9f89e58afb96088964145a83af7408b"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec9139baa3f85b65adc700eafa03ed04995ca8533dd56c924f0e458ffec044ab"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:26de93e6495078b6af4c4d93a42ca067b16cc0e95699526c82ab7d1025b4d3bf"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f3a0bda83c18195c361b5500377d0767749f128564ca95b42c8849fd475bb327"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:63e4c175cbce8c3adc22dca5e6154588ae673f6c55374d156f3dac732c88d7de"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4dd3d8443970eaa02ab5ae45ce584b061f2799cd9f7e875190e2617440c1f9d4"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e5ddb2388610799fc46abe389600625058f2a73867e63e20107c5ad5ffa57c47"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win32.whl", hash = "sha256:2e9be5d05cd960914024412b5406fb75a82f8562f45912ff86255acbfdbfb78e"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:47aca565a39c9a6067927871973ca827023e8b65ba6c5747f4c228c8d7ddc04f"}, + {file = "rapidfuzz-3.10.0-cp39-cp39-win_arm64.whl", hash = "sha256:b0732343cdc4273b5921268026dd7266f75466eb21873cb7635a200d9d9c3fac"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f744b5eb1469bf92dd143d36570d2bdbbdc88fe5cb0b5405e53dd34f479cbd8a"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b67cc21a14327a0eb0f47bc3d7e59ec08031c7c55220ece672f9476e7a8068d3"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fe5783676f0afba4a522c80b15e99dbf4e393c149ab610308a8ef1f04c6bcc8"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4688862f957c8629d557d084f20b2d803f8738b6c4066802a0b1cc472e088d9"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20bd153aacc244e4c907d772c703fea82754c4db14f8aa64d75ff81b7b8ab92d"}, + {file = "rapidfuzz-3.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:50484d563f8bfa723c74c944b0bb15b9e054db9c889348c8c307abcbee75ab92"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5897242d455461f2c5b82d7397b29341fd11e85bf3608a522177071044784ee8"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:116c71a81e046ba56551d8ab68067ca7034d94b617545316d460a452c5c3c289"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0a547e4350d1fa32624d3eab51eff8cf329f4cae110b4ea0402486b1da8be40"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:399b9b79ccfcf50ca3bad7692bc098bb8eade88d7d5e15773b7f866c91156d0c"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7947a425d1be3e744707ee58c6cb318b93a56e08f080722dcc0347e0b7a1bb9a"}, + {file = "rapidfuzz-3.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:94c48b4a2a4b1d22246f48e2b11cae01ec7d23f0c9123f8bb822839ad79d0a88"}, + {file = "rapidfuzz-3.10.0.tar.gz", hash = "sha256:6b62af27e65bb39276a66533655a2fa3c60a487b03935721c45b7809527979be"}, ] [package.extras] -full = ["numpy"] - -[[package]] -name = "readabilipy" -version = "0.2.0" -description = "Python wrapper for Mozilla's Readability.js" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "readabilipy-0.2.0-py3-none-any.whl", hash = "sha256:0050853cd6ab012ac75bb4d8f06427feb7dc32054da65060da44654d049802d0"}, - {file = "readabilipy-0.2.0.tar.gz", hash = "sha256:098bf347b19f362042fb6c08864ad776588bf844ac2261fb230f7f9c250fdae5"}, -] - -[package.dependencies] -beautifulsoup4 = ">=4.7.1" -html5lib = "*" -lxml = "*" -regex = "*" - -[package.extras] -dev = ["coveralls", "m2r", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-benchmark", "pytest-cov", "sphinx"] -docs = ["m2r", "sphinx"] -test = ["coveralls", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-benchmark", "pytest-cov"] +all = ["numpy"] [[package]] name = "redis" @@ -7827,26 +6647,6 @@ files = [ {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, ] -[[package]] -name = "replicate" -version = "0.22.0" -description = "Python client for Replicate" -optional = false -python-versions = ">=3.8" -files = [ - {file = "replicate-0.22.0-py3-none-any.whl", hash = "sha256:a11e20e9589981a96bee6f3817494b5cc29735a108c71aff4515a81863ad9996"}, - {file = "replicate-0.22.0.tar.gz", hash = "sha256:cab48c15ede619d5aa7d023a241626d504c70ea2b7db5792ebfb5ae9fa373cbc"}, -] - -[package.dependencies] -httpx = ">=0.21.0,<1" -packaging = "*" -pydantic = ">1" -typing-extensions = ">=4.5.0" - -[package.extras] -dev = ["pylint", "pyright", "pytest", "pytest-asyncio", "pytest-recording", "respx", "ruff (>=0.1.3)"] - [[package]] name = "requests" version = "2.31.0" @@ -7868,20 +6668,6 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] -[[package]] -name = "requests-file" -version = "2.1.0" -description = "File transport adapter for Requests" -optional = false -python-versions = "*" -files = [ - {file = "requests_file-2.1.0-py2.py3-none-any.whl", hash = "sha256:cf270de5a4c5874e84599fc5778303d496c10ae5e870bfa378818f35d21bda5c"}, - {file = "requests_file-2.1.0.tar.gz", hash = "sha256:0f549a3f3b0699415ac04d167e9cb39bccfb730cb832b4d20be3d9867356e658"}, -] - -[package.dependencies] -requests = ">=1.0.0" - [[package]] name = "requests-oauthlib" version = "2.0.0" @@ -7900,20 +6686,6 @@ requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] -[[package]] -name = "requests-toolbelt" -version = "1.0.0" -description = "A utility belt for advanced users of python-requests" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, - {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, -] - -[package.dependencies] -requests = ">=2.0.1,<3.0.0" - [[package]] name = "resend" version = "0.7.2" @@ -8293,13 +7065,13 @@ test = ["accelerate (>=0.24.1,<=0.27.0)", "apache-airflow (==2.9.3)", "apache-ai [[package]] name = "sagemaker-core" -version = "1.0.4" +version = "1.0.9" description = "An python package for sagemaker core functionalities" optional = false python-versions = ">=3.8" files = [ - {file = "sagemaker_core-1.0.4-py3-none-any.whl", hash = "sha256:bf71d988dbda03a3cd1557524f2fab4f19d89e54bd38fc7f05bbbcf580715f95"}, - {file = "sagemaker_core-1.0.4.tar.gz", hash = "sha256:203f4eb9d0d2a0e6ba80d79ba8c28b8ea27c94d04f6d9ff01c2fd55b95615c78"}, + {file = "sagemaker_core-1.0.9-py3-none-any.whl", hash = "sha256:7a22c46cf93594f8d44e3523d4ba98407911f3530af68a8ffdde5082d3b26fa3"}, + {file = "sagemaker_core-1.0.9.tar.gz", hash = "sha256:664115faf797412553fb81b97a4777e78e51dfd4454c32edb2c8371bf203c535"}, ] [package.dependencies] @@ -8473,34 +7245,24 @@ tornado = ["tornado (>=5)"] [[package]] name = "setuptools" -version = "74.1.2" +version = "75.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-74.1.2-py3-none-any.whl", hash = "sha256:5f4c08aa4d3ebcb57a50c33b1b07e94315d7fc7230f7115e47fc99776c8ce308"}, - {file = "setuptools-74.1.2.tar.gz", hash = "sha256:95b40ed940a1c67eb70fc099094bd6e99c6ee7c23aa2306f4d2697ba7916f9c6"}, + {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, + {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, ] [package.extras] check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] -[[package]] -name = "sgmllib3k" -version = "1.0.0" -description = "Py3k port of sgmllib." -optional = false -python-versions = "*" -files = [ - {file = "sgmllib3k-1.0.0.tar.gz", hash = "sha256:7868fb1c8bfa764c1ac563d3cf369c381d1325d36124933a726f29fcdaa812e9"}, -] - [[package]] name = "shapely" version = "2.0.6" @@ -8570,23 +7332,6 @@ files = [ {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, ] -[[package]] -name = "simple-websocket" -version = "1.0.0" -description = "Simple WebSocket server and client for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "simple-websocket-1.0.0.tar.gz", hash = "sha256:17d2c72f4a2bd85174a97e3e4c88b01c40c3f81b7b648b0cc3ce1305968928c8"}, - {file = "simple_websocket-1.0.0-py3-none-any.whl", hash = "sha256:1d5bf585e415eaa2083e2bcf02a3ecf91f9712e7b3e6b9fa0b461ad04e0837bc"}, -] - -[package.dependencies] -wsproto = "*" - -[package.extras] -docs = ["sphinx"] - [[package]] name = "six" version = "1.16.0" @@ -8644,60 +7389,60 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.34" +version = "2.0.35" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:95d0b2cf8791ab5fb9e3aa3d9a79a0d5d51f55b6357eecf532a120ba3b5524db"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:243f92596f4fd4c8bd30ab8e8dd5965afe226363d75cab2468f2c707f64cd83b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea54f7300553af0a2a7235e9b85f4204e1fc21848f917a3213b0e0818de9a24"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173f5f122d2e1bff8fbd9f7811b7942bead1f5e9f371cdf9e670b327e6703ebd"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:196958cde924a00488e3e83ff917be3b73cd4ed8352bbc0f2989333176d1c54d"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bd90c221ed4e60ac9d476db967f436cfcecbd4ef744537c0f2d5291439848768"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win32.whl", hash = "sha256:3166dfff2d16fe9be3241ee60ece6fcb01cf8e74dd7c5e0b64f8e19fab44911b"}, - {file = "SQLAlchemy-2.0.34-cp310-cp310-win_amd64.whl", hash = "sha256:6831a78bbd3c40f909b3e5233f87341f12d0b34a58f14115c9e94b4cdaf726d3"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7db3db284a0edaebe87f8f6642c2b2c27ed85c3e70064b84d1c9e4ec06d5d84"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:430093fce0efc7941d911d34f75a70084f12f6ca5c15d19595c18753edb7c33b"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79cb400c360c7c210097b147c16a9e4c14688a6402445ac848f296ade6283bbc"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1b30f31a36c7f3fee848391ff77eebdd3af5750bf95fbf9b8b5323edfdb4ec"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fddde2368e777ea2a4891a3fb4341e910a056be0bb15303bf1b92f073b80c02"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:80bd73ea335203b125cf1d8e50fef06be709619eb6ab9e7b891ea34b5baa2287"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win32.whl", hash = "sha256:6daeb8382d0df526372abd9cb795c992e18eed25ef2c43afe518c73f8cccb721"}, - {file = "SQLAlchemy-2.0.34-cp311-cp311-win_amd64.whl", hash = "sha256:5bc08e75ed11693ecb648b7a0a4ed80da6d10845e44be0c98c03f2f880b68ff4"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:53e68b091492c8ed2bd0141e00ad3089bcc6bf0e6ec4142ad6505b4afe64163e"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bcd18441a49499bf5528deaa9dee1f5c01ca491fc2791b13604e8f972877f812"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:165bbe0b376541092bf49542bd9827b048357f4623486096fc9aaa6d4e7c59a2"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3330415cd387d2b88600e8e26b510d0370db9b7eaf984354a43e19c40df2e2b"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97b850f73f8abbffb66ccbab6e55a195a0eb655e5dc74624d15cff4bfb35bd74"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee4c6917857fd6121ed84f56d1dc78eb1d0e87f845ab5a568aba73e78adf83"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win32.whl", hash = "sha256:fbb034f565ecbe6c530dff948239377ba859420d146d5f62f0271407ffb8c580"}, - {file = "SQLAlchemy-2.0.34-cp312-cp312-win_amd64.whl", hash = "sha256:707c8f44931a4facd4149b52b75b80544a8d824162602b8cd2fe788207307f9a"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:24af3dc43568f3780b7e1e57c49b41d98b2d940c1fd2e62d65d3928b6f95f021"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60ed6ef0a35c6b76b7640fe452d0e47acc832ccbb8475de549a5cc5f90c2c06"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:413c85cd0177c23e32dee6898c67a5f49296640041d98fddb2c40888fe4daa2e"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:25691f4adfb9d5e796fd48bf1432272f95f4bbe5f89c475a788f31232ea6afba"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:526ce723265643dbc4c7efb54f56648cc30e7abe20f387d763364b3ce7506c82"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win32.whl", hash = "sha256:13be2cc683b76977a700948411a94c67ad8faf542fa7da2a4b167f2244781cf3"}, - {file = "SQLAlchemy-2.0.34-cp37-cp37m-win_amd64.whl", hash = "sha256:e54ef33ea80d464c3dcfe881eb00ad5921b60f8115ea1a30d781653edc2fd6a2"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:43f28005141165edd11fbbf1541c920bd29e167b8bbc1fb410d4fe2269c1667a"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b68094b165a9e930aedef90725a8fcfafe9ef95370cbb54abc0464062dbf808f"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1e03db964e9d32f112bae36f0cc1dcd1988d096cfd75d6a588a3c3def9ab2b"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203d46bddeaa7982f9c3cc693e5bc93db476ab5de9d4b4640d5c99ff219bee8c"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ae92bebca3b1e6bd203494e5ef919a60fb6dfe4d9a47ed2453211d3bd451b9f5"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9661268415f450c95f72f0ac1217cc6f10256f860eed85c2ae32e75b60278ad8"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win32.whl", hash = "sha256:895184dfef8708e15f7516bd930bda7e50ead069280d2ce09ba11781b630a434"}, - {file = "SQLAlchemy-2.0.34-cp38-cp38-win_amd64.whl", hash = "sha256:6e7cde3a2221aa89247944cafb1b26616380e30c63e37ed19ff0bba5e968688d"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbcdf987f3aceef9763b6d7b1fd3e4ee210ddd26cac421d78b3c206d07b2700b"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce119fc4ce0d64124d37f66a6f2a584fddc3c5001755f8a49f1ca0a177ef9796"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a17d8fac6df9835d8e2b4c5523666e7051d0897a93756518a1fe101c7f47f2f0"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ebc11c54c6ecdd07bb4efbfa1554538982f5432dfb8456958b6d46b9f834bb7"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e6965346fc1491a566e019a4a1d3dfc081ce7ac1a736536367ca305da6472a8"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:220574e78ad986aea8e81ac68821e47ea9202b7e44f251b7ed8c66d9ae3f4278"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win32.whl", hash = "sha256:b75b00083e7fe6621ce13cfce9d4469c4774e55e8e9d38c305b37f13cf1e874c"}, - {file = "SQLAlchemy-2.0.34-cp39-cp39-win_amd64.whl", hash = "sha256:c29d03e0adf3cc1a8c3ec62d176824972ae29b67a66cbb18daff3062acc6faa8"}, - {file = "SQLAlchemy-2.0.34-py3-none-any.whl", hash = "sha256:7286c353ee6475613d8beff83167374006c6b3e3f0e6491bfe8ca610eb1dec0f"}, - {file = "sqlalchemy-2.0.34.tar.gz", hash = "sha256:10d8f36990dd929690666679b0f42235c159a7051534adb135728ee52828dd22"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -8729,30 +7474,15 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] -[[package]] -name = "sqlparse" -version = "0.5.1" -description = "A non-validating SQL parser." -optional = false -python-versions = ">=3.8" -files = [ - {file = "sqlparse-0.5.1-py3-none-any.whl", hash = "sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4"}, - {file = "sqlparse-0.5.1.tar.gz", hash = "sha256:bb6b4df465655ef332548e24f08e205afc81b9ab86cb1c45657a7ff173a3a00e"}, -] - -[package.extras] -dev = ["build", "hatch"] -doc = ["sphinx"] - [[package]] name = "starlette" -version = "0.38.5" +version = "0.38.6" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.38.5-py3-none-any.whl", hash = "sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206"}, - {file = "starlette-0.38.5.tar.gz", hash = "sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077"}, + {file = "starlette-0.38.6-py3-none-any.whl", hash = "sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05"}, + {file = "starlette-0.38.6.tar.gz", hash = "sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead"}, ] [package.dependencies] @@ -8761,29 +7491,15 @@ anyio = ">=3.4.0,<5" [package.extras] full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] -[[package]] -name = "strictyaml" -version = "1.7.3" -description = "Strict, typed YAML parser" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "strictyaml-1.7.3-py3-none-any.whl", hash = "sha256:fb5c8a4edb43bebb765959e420f9b3978d7f1af88c80606c03fb420888f5d1c7"}, - {file = "strictyaml-1.7.3.tar.gz", hash = "sha256:22f854a5fcab42b5ddba8030a0e4be51ca89af0267961c8d6cfa86395586c407"}, -] - -[package.dependencies] -python-dateutil = ">=2.6.0" - [[package]] name = "sympy" -version = "1.13.2" +version = "1.13.3" description = "Computer algebra system (CAS) in Python" optional = false python-versions = ">=3.8" files = [ - {file = "sympy-1.13.2-py3-none-any.whl", hash = "sha256:c51d75517712f1aed280d4ce58506a4a88d635d6b5dd48b39102a7ae1f3fcfe9"}, - {file = "sympy-1.13.2.tar.gz", hash = "sha256:401449d84d07be9d0c7a46a64bd54fe097667d5e7181bfe67ec777be9e01cb13"}, + {file = "sympy-1.13.3-py3-none-any.whl", hash = "sha256:54612cf55a62755ee71824ce692986f23c88ffa77207b30c1368eda4a7060f73"}, + {file = "sympy-1.13.3.tar.gz", hash = "sha256:b27fd2c6530e0ab39e275fc9b683895367e51d5da91baa8d3d64db2565fec4d9"}, ] [package.dependencies] @@ -8849,13 +7565,13 @@ test = ["pytest", "tornado (>=4.5)", "typeguard"] [[package]] name = "tencentcloud-sdk-python-common" -version = "3.0.1231" +version = "3.0.1242" description = "Tencent Cloud Common SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-common-3.0.1231.tar.gz", hash = "sha256:22aa281ca2eac511e1615b2953da7c4a0bec87cf93a05a7a15dbb61b23a215ee"}, - {file = "tencentcloud_sdk_python_common-3.0.1231-py2.py3-none-any.whl", hash = "sha256:bd0f7c4df4b156ec35c8731afa1f498043c7e1cd5d2feb595ee441fdb45a061e"}, + {file = "tencentcloud-sdk-python-common-3.0.1242.tar.gz", hash = "sha256:fb9cd993f5d1378932b495e85ba4f29b0b04b4c30d174a6ba98d8995096fe8f0"}, + {file = "tencentcloud_sdk_python_common-3.0.1242-py2.py3-none-any.whl", hash = "sha256:a106d926e772e9a89717b72595e595636ab09907f3c56a3ff99ba2d0444e0da4"}, ] [package.dependencies] @@ -8863,17 +7579,17 @@ requests = ">=2.16.0" [[package]] name = "tencentcloud-sdk-python-hunyuan" -version = "3.0.1231" +version = "3.0.1242" description = "Tencent Cloud Hunyuan SDK for Python" optional = false python-versions = "*" files = [ - {file = "tencentcloud-sdk-python-hunyuan-3.0.1231.tar.gz", hash = "sha256:6da12f418f14305b3a6b7bb29b6d95bf4038a6b66b81c0e03b8dafc4f6df99ca"}, - {file = "tencentcloud_sdk_python_hunyuan-3.0.1231-py2.py3-none-any.whl", hash = "sha256:21ba28f69c34c15e20900be3f2c06376fcaf7e58265f939833c55631f2348792"}, + {file = "tencentcloud-sdk-python-hunyuan-3.0.1242.tar.gz", hash = "sha256:4d0daf9349ac9274887d08514b988e30101ab2a659c034590ce5920458585840"}, + {file = "tencentcloud_sdk_python_hunyuan-3.0.1242-py2.py3-none-any.whl", hash = "sha256:1989f13b5f365ff1c6b3c3fb9c6d2fda71dc63cf62b98f26f1dc449d89547852"}, ] [package.dependencies] -tencentcloud-sdk-python-common = "3.0.1231" +tencentcloud-sdk-python-common = "3.0.1242" [[package]] name = "threadpoolctl" @@ -8955,37 +7671,6 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] -[[package]] -name = "tinysegmenter" -version = "0.3" -description = "Very compact Japanese tokenizer" -optional = false -python-versions = "*" -files = [ - {file = "tinysegmenter-0.3.tar.gz", hash = "sha256:ed1f6d2e806a4758a73be589754384cbadadc7e1a414c81a166fc9adf2d40c6d"}, -] - -[[package]] -name = "tldextract" -version = "5.1.2" -description = "Accurately separates a URL's subdomain, domain, and public suffix, using the Public Suffix List (PSL). By default, this includes the public ICANN TLDs and their exceptions. You can optionally support the Public Suffix List's private domains as well." -optional = false -python-versions = ">=3.8" -files = [ - {file = "tldextract-5.1.2-py3-none-any.whl", hash = "sha256:4dfc4c277b6b97fa053899fcdb892d2dc27295851ab5fac4e07797b6a21b2e46"}, - {file = "tldextract-5.1.2.tar.gz", hash = "sha256:c9e17f756f05afb5abac04fe8f766e7e70f9fe387adb1859f0f52408ee060200"}, -] - -[package.dependencies] -filelock = ">=3.0.8" -idna = "*" -requests = ">=2.1.0" -requests-file = ">=1.4" - -[package.extras] -release = ["build", "twine"] -testing = ["black", "mypy", "pytest", "pytest-gitignore", "pytest-mock", "responses", "ruff", "syrupy", "tox", "types-filelock", "types-requests"] - [[package]] name = "tokenizers" version = "0.15.2" @@ -9172,91 +7857,6 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] -[[package]] -name = "transformers" -version = "4.35.2" -description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "transformers-4.35.2-py3-none-any.whl", hash = "sha256:9dfa76f8692379544ead84d98f537be01cd1070de75c74efb13abcbc938fbe2f"}, - {file = "transformers-4.35.2.tar.gz", hash = "sha256:2d125e197d77b0cdb6c9201df9fa7e2101493272e448b9fba9341c695bee2f52"}, -] - -[package.dependencies] -filelock = "*" -huggingface-hub = ">=0.16.4,<1.0" -numpy = ">=1.17" -packaging = ">=20.0" -pyyaml = ">=5.1" -regex = "!=2019.12.17" -requests = "*" -safetensors = ">=0.3.1" -tokenizers = ">=0.14,<0.19" -tqdm = ">=4.27" - -[package.extras] -accelerate = ["accelerate (>=0.20.3)"] -agents = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.10,!=1.12.0)"] -all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] -audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -codecarbon = ["codecarbon (==1.2.0)"] -deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] -docs-specific = ["hf-doc-builder"] -flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] -flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -ftfy = ["ftfy"] -integrations = ["optuna", "ray[tune]", "sigopt"] -ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] -modelcreation = ["cookiecutter (==1.7.3)"] -natten = ["natten (>=0.14.6)"] -onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] -onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] -optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] -ray = ["ray[tune]"] -retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] -sagemaker = ["sagemaker (>=2.31.0)"] -sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] -serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] -sigopt = ["sigopt"] -sklearn = ["scikit-learn"] -speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] -tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] -tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.15)", "tensorflow-text (<2.15)", "tf2onnx"] -tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -timm = ["timm"] -tokenizers = ["tokenizers (>=0.14,<0.19)"] -torch = ["accelerate (>=0.20.3)", "torch (>=1.10,!=1.12.0)"] -torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -torch-vision = ["Pillow (<10.0.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.16.4,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "tqdm (>=4.27)"] -video = ["av (==9.2.0)", "decord (==0.6.0)"] -vision = ["Pillow (<10.0.0)"] - -[[package]] -name = "twilio" -version = "9.0.5" -description = "Twilio API client and TwiML generator" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "twilio-9.0.5-py2.py3-none-any.whl", hash = "sha256:5e09e910b9368f50f23cb3c3dd5ba77164d80a81e9d97db955cbac322deb2a4e"}, - {file = "twilio-9.0.5.tar.gz", hash = "sha256:e9b5727943584d25d618fe502f0100fc5283215f31c863f80b5c64581b4702b0"}, -] - -[package.dependencies] -aiohttp = ">=3.8.4" -aiohttp-retry = ">=2.8.3" -PyJWT = ">=2.0.0,<3.0.0" -requests = ">=2.0.0" - [[package]] name = "typer" version = "0.12.5" @@ -9274,20 +7874,6 @@ rich = ">=10.11.0" shellingham = ">=1.3.0" typing-extensions = ">=3.7.4.3" -[[package]] -name = "types-requests" -version = "2.32.0.20240914" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, -] - -[package.dependencies] -urllib3 = ">=2" - [[package]] name = "typing-extensions" version = "4.12.2" @@ -9316,32 +7902,15 @@ typing-extensions = ">=3.7.4" [[package]] name = "tzdata" -version = "2024.1" +version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] -[[package]] -name = "tzlocal" -version = "5.2" -description = "tzinfo object for the local timezone" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tzlocal-5.2-py3-none-any.whl", hash = "sha256:49816ef2fe65ea8ac19d19aa7a1ae0551c834303d5014c6d5a62e4cbda8047b8"}, - {file = "tzlocal-5.2.tar.gz", hash = "sha256:8d399205578f1a9342816409cc1e46a93ebd5755e39ea2d85334bea911bf0e6e"}, -] - -[package.dependencies] -tzdata = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -devenv = ["check-manifest", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] - [[package]] name = "ujson" version = "5.10.0" @@ -9542,13 +8111,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.30.6" +version = "0.31.0" description = "The lightning-fast ASGI server." optional = false python-versions = ">=3.8" files = [ - {file = "uvicorn-0.30.6-py3-none-any.whl", hash = "sha256:65fd46fe3fda5bdc1b03b94eb634923ff18cd35b2f084813ea79d1f103f711b5"}, - {file = "uvicorn-0.30.6.tar.gz", hash = "sha256:4b15decdda1e72be08209e860a1e10e92439ad5b97cf44cc945fcbee66fc5788"}, + {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, + {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, ] [package.dependencies] @@ -9621,57 +8190,6 @@ files = [ {file = "validators-0.21.0.tar.gz", hash = "sha256:245b98ab778ed9352a7269c6a8f6c2a839bed5b2a7e3e60273ce399d247dd4b3"}, ] -[[package]] -name = "vanna" -version = "0.5.5" -description = "Generate SQL queries from natural language" -optional = false -python-versions = ">=3.9" -files = [ - {file = "vanna-0.5.5-py3-none-any.whl", hash = "sha256:e1a308b7127b9e98c2579c0e4178fc1475d891c498e4a0667cffa10df8891e73"}, - {file = "vanna-0.5.5.tar.gz", hash = "sha256:7d9bf188a635bb75e4f8db15f0e6dbe72a426784779485f087b2df0ce175e664"}, -] - -[package.dependencies] -clickhouse_driver = {version = "*", optional = true, markers = "extra == \"clickhouse\""} -db-dtypes = {version = "*", optional = true, markers = "extra == \"postgres\""} -duckdb = {version = "*", optional = true, markers = "extra == \"duckdb\""} -flask = "*" -flask-sock = "*" -kaleido = "*" -pandas = "*" -plotly = "*" -psycopg2-binary = {version = "*", optional = true, markers = "extra == \"postgres\""} -PyMySQL = {version = "*", optional = true, markers = "extra == \"mysql\""} -requests = "*" -sqlalchemy = "*" -sqlparse = "*" -tabulate = "*" - -[package.extras] -all = ["PyMySQL", "anthropic", "chromadb", "db-dtypes", "duckdb", "fastembed", "google-cloud-aiplatform", "google-cloud-bigquery", "google-generativeai", "httpx", "marqo", "mistralai", "ollama", "openai", "opensearch-dsl", "opensearch-py", "pinecone-client", "psycopg2-binary", "qdrant-client", "snowflake-connector-python", "transformers", "zhipuai"] -anthropic = ["anthropic"] -bigquery = ["google-cloud-bigquery"] -chromadb = ["chromadb"] -clickhouse = ["clickhouse_driver"] -duckdb = ["duckdb"] -gemini = ["google-generativeai"] -google = ["google-cloud-aiplatform", "google-generativeai"] -hf = ["transformers"] -marqo = ["marqo"] -mistralai = ["mistralai"] -mysql = ["PyMySQL"] -ollama = ["httpx", "ollama"] -openai = ["openai"] -opensearch = ["opensearch-dsl", "opensearch-py"] -pinecone = ["fastembed", "pinecone-client"] -postgres = ["db-dtypes", "psycopg2-binary"] -qdrant = ["fastembed", "qdrant-client"] -snowflake = ["snowflake-connector-python"] -test = ["tox"] -vllm = ["vllm"] -zhipuai = ["zhipuai"] - [[package]] name = "vine" version = "5.1.0" @@ -9685,12 +8203,12 @@ files = [ [[package]] name = "volcengine-python-sdk" -version = "1.0.101" +version = "1.0.103" description = "Volcengine SDK for Python" optional = false python-versions = "*" files = [ - {file = "volcengine-python-sdk-1.0.101.tar.gz", hash = "sha256:1b76e71a6dcf3d5be1b2c058e7d281359e6cca2cc920ffe2567d3115beea1d02"}, + {file = "volcengine-python-sdk-1.0.103.tar.gz", hash = "sha256:49fa8572802724972e1cb47a7e692b184b055f41b09099358c1a0fad1d146af5"}, ] [package.dependencies] @@ -9831,126 +8349,115 @@ validators = ">=0.18.2,<=0.21.0" [package.extras] grpc = ["grpcio", "grpcio-tools"] -[[package]] -name = "webencodings" -version = "0.5.1" -description = "Character encoding aliases for legacy web content" -optional = false -python-versions = "*" -files = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] - [[package]] name = "websocket-client" -version = "1.7.0" +version = "1.8.0" description = "WebSocket client for Python with low level API options" optional = false python-versions = ">=3.8" files = [ - {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"}, - {file = "websocket_client-1.7.0-py3-none-any.whl", hash = "sha256:f4c3d22fec12a2461427a29957ff07d35098ee2d976d3ba244e688b8b4057588"}, + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, ] [package.extras] -docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] +docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] test = ["websockets"] [[package]] name = "websockets" -version = "13.0.1" +version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.8" files = [ - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1841c9082a3ba4a05ea824cf6d99570a6a2d8849ef0db16e9c826acb28089e8f"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c5870b4a11b77e4caa3937142b650fbbc0914a3e07a0cf3131f35c0587489c1c"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1d3d1f2eb79fe7b0fb02e599b2bf76a7619c79300fc55f0b5e2d382881d4f7f"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c7d62ee071fa94a2fc52c2b472fed4af258d43f9030479d9c4a2de885fd543"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6724b554b70d6195ba19650fef5759ef11346f946c07dbbe390e039bcaa7cc3d"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a952fa2ae57a42ba7951e6b2605e08a24801a4931b5644dfc68939e041bc7f"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17118647c0ea14796364299e942c330d72acc4b248e07e639d34b75067b3cdd8"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a11aae1de4c178fa653b07d90f2fb1a2ed31919a5ea2361a38760192e1858b"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0617fd0b1d14309c7eab6ba5deae8a7179959861846cbc5cb528a7531c249448"}, - {file = "websockets-13.0.1-cp310-cp310-win32.whl", hash = "sha256:11f9976ecbc530248cf162e359a92f37b7b282de88d1d194f2167b5e7ad80ce3"}, - {file = "websockets-13.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c3c493d0e5141ec055a7d6809a28ac2b88d5b878bb22df8c621ebe79a61123d0"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:699ba9dd6a926f82a277063603fc8d586b89f4cb128efc353b749b641fcddda7"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf2fae6d85e5dc384bf846f8243ddaa9197f3a1a70044f59399af001fd1f51d4"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52aed6ef21a0f1a2a5e310fb5c42d7555e9c5855476bbd7173c3aa3d8a0302f2"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb2b9a318542153674c6e377eb8cb9ca0fc011c04475110d3477862f15d29f0"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5df891c86fe68b2c38da55b7aea7095beca105933c697d719f3f45f4220a5e0e"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2d146ff30d9dd2fcf917e5d147db037a5c573f0446c564f16f1f94cf87462"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b8ac5b46fd798bbbf2ac6620e0437c36a202b08e1f827832c4bf050da081b501"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46af561eba6f9b0848b2c9d2427086cabadf14e0abdd9fde9d72d447df268418"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b5a06d7f60bc2fc378a333978470dfc4e1415ee52f5f0fce4f7853eb10c1e9df"}, - {file = "websockets-13.0.1-cp311-cp311-win32.whl", hash = "sha256:556e70e4f69be1082e6ef26dcb70efcd08d1850f5d6c5f4f2bcb4e397e68f01f"}, - {file = "websockets-13.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:67494e95d6565bf395476e9d040037ff69c8b3fa356a886b21d8422ad86ae075"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f9c9e258e3d5efe199ec23903f5da0eeaad58cf6fccb3547b74fd4750e5ac47a"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6b41a1b3b561f1cba8321fb32987552a024a8f67f0d05f06fcf29f0090a1b956"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f73e676a46b0fe9426612ce8caeca54c9073191a77c3e9d5c94697aef99296af"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f613289f4a94142f914aafad6c6c87903de78eae1e140fa769a7385fb232fdf"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f52504023b1480d458adf496dc1c9e9811df4ba4752f0bc1f89ae92f4f07d0c"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:139add0f98206cb74109faf3611b7783ceafc928529c62b389917a037d4cfdf4"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47236c13be337ef36546004ce8c5580f4b1150d9538b27bf8a5ad8edf23ccfab"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c44ca9ade59b2e376612df34e837013e2b273e6c92d7ed6636d0556b6f4db93d"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9bbc525f4be3e51b89b2a700f5746c2a6907d2e2ef4513a8daafc98198b92237"}, - {file = "websockets-13.0.1-cp312-cp312-win32.whl", hash = "sha256:3624fd8664f2577cf8de996db3250662e259bfbc870dd8ebdcf5d7c6ac0b5185"}, - {file = "websockets-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0513c727fb8adffa6d9bf4a4463b2bade0186cbd8c3604ae5540fae18a90cb99"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1ee4cc030a4bdab482a37462dbf3ffb7e09334d01dd37d1063be1136a0d825fa"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbb0b697cc0655719522406c059eae233abaa3243821cfdfab1215d02ac10231"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:acbebec8cb3d4df6e2488fbf34702cbc37fc39ac7abf9449392cefb3305562e9"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63848cdb6fcc0bf09d4a155464c46c64ffdb5807ede4fb251da2c2692559ce75"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872afa52a9f4c414d6955c365b6588bc4401272c629ff8321a55f44e3f62b553"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e70fec7c54aad4d71eae8e8cab50525e899791fc389ec6f77b95312e4e9920"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e82db3756ccb66266504f5a3de05ac6b32f287faacff72462612120074103329"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4e85f46ce287f5c52438bb3703d86162263afccf034a5ef13dbe4318e98d86e7"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f3fea72e4e6edb983908f0db373ae0732b275628901d909c382aae3b592589f2"}, - {file = "websockets-13.0.1-cp313-cp313-win32.whl", hash = "sha256:254ecf35572fca01a9f789a1d0f543898e222f7b69ecd7d5381d8d8047627bdb"}, - {file = "websockets-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca48914cdd9f2ccd94deab5bcb5ac98025a5ddce98881e5cce762854a5de330b"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b74593e9acf18ea5469c3edaa6b27fa7ecf97b30e9dabd5a94c4c940637ab96e"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:132511bfd42e77d152c919147078460c88a795af16b50e42a0bd14f0ad71ddd2"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:165bedf13556f985a2aa064309baa01462aa79bf6112fbd068ae38993a0e1f1b"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e801ca2f448850685417d723ec70298feff3ce4ff687c6f20922c7474b4746ae"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30d3a1f041360f029765d8704eae606781e673e8918e6b2c792e0775de51352f"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67648f5e50231b5a7f6d83b32f9c525e319f0ddc841be0de64f24928cd75a603"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4f0426d51c8f0926a4879390f53c7f5a855e42d68df95fff6032c82c888b5f36"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ef48e4137e8799998a343706531e656fdec6797b80efd029117edacb74b0a10a"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:249aab278810bee585cd0d4de2f08cfd67eed4fc75bde623be163798ed4db2eb"}, - {file = "websockets-13.0.1-cp38-cp38-win32.whl", hash = "sha256:06c0a667e466fcb56a0886d924b5f29a7f0886199102f0a0e1c60a02a3751cb4"}, - {file = "websockets-13.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1f3cf6d6ec1142412d4535adabc6bd72a63f5f148c43fe559f06298bc21953c9"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1fa082ea38d5de51dd409434edc27c0dcbd5fed2b09b9be982deb6f0508d25bc"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a365bcb7be554e6e1f9f3ed64016e67e2fa03d7b027a33e436aecf194febb63"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10a0dc7242215d794fb1918f69c6bb235f1f627aaf19e77f05336d147fce7c37"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59197afd478545b1f73367620407b0083303569c5f2d043afe5363676f2697c9"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d20516990d8ad557b5abeb48127b8b779b0b7e6771a265fa3e91767596d7d97"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1a2e272d067030048e1fe41aa1ec8cfbbaabce733b3d634304fa2b19e5c897f"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad327ac80ba7ee61da85383ca8822ff808ab5ada0e4a030d66703cc025b021c4"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:518f90e6dd089d34eaade01101fd8a990921c3ba18ebbe9b0165b46ebff947f0"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68264802399aed6fe9652e89761031acc734fc4c653137a5911c2bfa995d6d6d"}, - {file = "websockets-13.0.1-cp39-cp39-win32.whl", hash = "sha256:a5dc0c42ded1557cc7c3f0240b24129aefbad88af4f09346164349391dea8e58"}, - {file = "websockets-13.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b448a0690ef43db5ef31b3a0d9aea79043882b4632cfc3eaab20105edecf6097"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:faef9ec6354fe4f9a2c0bbb52fb1ff852effc897e2a4501e25eb3a47cb0a4f89"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:03d3f9ba172e0a53e37fa4e636b86cc60c3ab2cfee4935e66ed1d7acaa4625ad"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d450f5a7a35662a9b91a64aefa852f0c0308ee256122f5218a42f1d13577d71e"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f55b36d17ac50aa8a171b771e15fbe1561217510c8768af3d546f56c7576cdc"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14b9c006cac63772b31abbcd3e3abb6228233eec966bf062e89e7fa7ae0b7333"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b79915a1179a91f6c5f04ece1e592e2e8a6bd245a0e45d12fd56b2b59e559a32"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f40de079779acbcdbb6ed4c65af9f018f8b77c5ec4e17a4b737c05c2db554491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80e4ba642fc87fa532bac07e5ed7e19d56940b6af6a8c61d4429be48718a380f"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a02b0161c43cc9e0232711eff846569fad6ec836a7acab16b3cf97b2344c060"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aa74a45d4cdc028561a7d6ab3272c8b3018e23723100b12e58be9dfa5a24491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00fd961943b6c10ee6f0b1130753e50ac5dcd906130dcd77b0003c3ab797d026"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d93572720d781331fb10d3da9ca1067817d84ad1e7c31466e9f5e59965618096"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:71e6e5a3a3728886caee9ab8752e8113670936a193284be9d6ad2176a137f376"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c4a6343e3b0714e80da0b0893543bf9a5b5fa71b846ae640e56e9abc6fbc4c83"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a678532018e435396e37422a95e3ab87f75028ac79570ad11f5bf23cd2a7d8c"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6716c087e4aa0b9260c4e579bb82e068f84faddb9bfba9906cb87726fa2e870"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33505534f3f673270dd67f81e73550b11de5b538c56fe04435d63c02c3f26b5"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acab3539a027a85d568c2573291e864333ec9d912675107d6efceb7e2be5d980"}, - {file = "websockets-13.0.1-py3-none-any.whl", hash = "sha256:b80f0c51681c517604152eb6a572f5a9378f877763231fddb883ba2f968e8817"}, - {file = "websockets-13.0.1.tar.gz", hash = "sha256:4d6ece65099411cfd9a48d13701d7438d9c34f479046b34c50ff60bb8834e43e"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [[package]] @@ -9970,20 +8477,6 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] -[[package]] -name = "wikipedia" -version = "1.4.0" -description = "Wikipedia API for Python" -optional = false -python-versions = "*" -files = [ - {file = "wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2"}, -] - -[package.dependencies] -beautifulsoup4 = "*" -requests = ">=2.0.0,<3.0.0" - [[package]] name = "win32-setctime" version = "1.1.0" @@ -10077,39 +8570,6 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] -[[package]] -name = "wsproto" -version = "1.2.0" -description = "WebSockets state-machine based protocol implementation" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, - {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, -] - -[package.dependencies] -h11 = ">=0.9.0,<1" - -[[package]] -name = "xinference-client" -version = "0.15.2" -description = "Client for Xinference" -optional = false -python-versions = "*" -files = [ - {file = "xinference-client-0.15.2.tar.gz", hash = "sha256:5c2259bb133148d1cc9bd2b8ec6eb8b5bbeba7f11d6252959f4e6cd79baa53ed"}, - {file = "xinference_client-0.15.2-py3-none-any.whl", hash = "sha256:b6275adab695e75e75a33e21e0ad212488fc2d5a4d0f693d544c0e78469abbe3"}, -] - -[package.dependencies] -pydantic = "*" -requests = "*" -typing-extensions = "*" - -[package.extras] -dev = ["black", "cython (>=0.29)", "flake8 (>=3.8.0)", "ipython (>=6.5.0)", "pytest (>=3.5.0)", "pytest-asyncio (>=0.14.0)", "pytest-cov (>=2.5.0)", "pytest-forked (>=1.0)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=1.2.0)"] - [[package]] name = "xlrd" version = "2.0.1" @@ -10253,51 +8713,6 @@ files = [ idna = ">=2.0" multidict = ">=4.0" -[[package]] -name = "yfinance" -version = "0.2.43" -description = "Download market data from Yahoo! Finance API" -optional = false -python-versions = "*" -files = [ - {file = "yfinance-0.2.43-py2.py3-none-any.whl", hash = "sha256:11b4f5515b17450bd3bdcdc26b299aeeaea7ff9cb63d0fa0a865f460c0c7618f"}, - {file = "yfinance-0.2.43.tar.gz", hash = "sha256:32404597f325a2a2c2708aceb8d552088dd26891ac0e6018f6c5f3f2f61055f0"}, -] - -[package.dependencies] -beautifulsoup4 = ">=4.11.1" -frozendict = ">=2.3.4" -html5lib = ">=1.1" -lxml = ">=4.9.1" -multitasking = ">=0.0.7" -numpy = ">=1.16.5" -pandas = ">=1.3.0" -peewee = ">=3.16.2" -platformdirs = ">=2.0.0" -pytz = ">=2022.5" -requests = ">=2.31" - -[package.extras] -nospam = ["requests-cache (>=1.0)", "requests-ratelimiter (>=0.3.1)"] -repair = ["scipy (>=1.6.3)"] - -[[package]] -name = "zhipuai" -version = "1.0.7" -description = "A SDK library for accessing big model apis from ZhipuAI" -optional = false -python-versions = ">=3.6" -files = [ - {file = "zhipuai-1.0.7-py3-none-any.whl", hash = "sha256:360c01b8c2698f366061452e86d5a36a5ff68a576ea33940da98e4806f232530"}, - {file = "zhipuai-1.0.7.tar.gz", hash = "sha256:b80f699543d83cce8648acf1ce32bc2725d1c1c443baffa5882abc2cc704d581"}, -] - -[package.dependencies] -cachetools = "*" -dataclasses = "*" -PyJWT = "*" -requests = "*" - [[package]] name = "zipp" version = "3.20.2" @@ -10501,4 +8916,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "c4580c22e2b220c8c80dbc3f765060a09e14874ed29b690c13a533bf0365e789" +content-hash = "46120eb2caaf416a798cfe425674e3dcf83a9c7f1b1273e7703ca32ebea21ffd" diff --git a/api/pyproject.toml b/api/pyproject.toml index e737761f3b..b5501ff228 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -134,7 +134,6 @@ package-mode = false ############################################################ [tool.poetry.dependencies] -anthropic = "~0.23.1" authlib = "1.3.1" azure-identity = "1.16.1" azure-storage-blob = "12.13.0" @@ -145,10 +144,8 @@ bs4 = "~0.0.1" cachetools = "~5.3.0" celery = "~5.3.6" chardet = "~5.1.0" -cohere = "~5.2.4" cos-python-sdk-v5 = "1.9.30" esdk-obs-python = "3.24.6.1" -dashscope = { version = "~1.17.0", extras = ["tokenizer"] } flask = "~3.0.1" flask-compress = "~1.14" flask-cors = "~4.0.0" @@ -169,13 +166,12 @@ google-generativeai = "0.8.1" googleapis-common-protos = "1.63.0" gunicorn = "~22.0.0" httpx = { version = "~0.27.0", extras = ["socks"] } -huggingface-hub = "~0.16.4" jieba = "0.42.1" langfuse = "^2.48.0" langsmith = "^0.1.77" mailchimp-transactional = "~1.0.50" markdown = "~3.5.1" -novita-client = "^0.5.7" +nltk = "3.8.1" numpy = "~1.26.4" openai = "~1.29.0" openpyxl = "~3.1.5" @@ -192,9 +188,7 @@ python = ">=3.10,<3.13" python-docx = "~1.1.0" python-dotenv = "1.0.0" pyyaml = "~6.0.1" -readabilipy = "0.2.0" redis = { version = "~5.0.3", extras = ["hiredis"] } -replicate = "~0.22.0" resend = "~0.7.0" scikit-learn = "^1.5.1" sentry-sdk = { version = "~1.44.1", extras = ["flask"] } @@ -202,21 +196,15 @@ sqlalchemy = "~2.0.29" tencentcloud-sdk-python-hunyuan = "~3.0.1158" tiktoken = "~0.7.0" tokenizers = "~0.15.0" -transformers = "~4.35.0" unstructured = { version = "~0.10.27", extras = ["docx", "epub", "md", "msg", "ppt", "pptx"] } -websocket-client = "~1.7.0" werkzeug = "~3.0.1" -xinference-client = "0.15.2" yarl = "~1.9.4" -zhipuai = "1.0.7" # Before adding new dependency, consider place it in alphabet order (a-z) and suitable group. ############################################################ # Related transparent dependencies with pinned version # required by main implementations ############################################################ -azure-ai-ml = "^1.19.0" -azure-ai-inference = "^1.0.0b3" volcengine-python-sdk = {extras = ["ark"], version = "^1.0.98"} oci = "^2.133.0" tos = "^2.7.1" @@ -231,20 +219,7 @@ safetensors = "~0.4.3" ############################################################ [tool.poetry.group.tool.dependencies] -arxiv = "2.1.0" -cloudscraper = "1.2.71" -matplotlib = "~3.8.2" -newspaper3k = "0.2.8" -duckduckgo-search = "^6.2.6" -jsonpath-ng = "1.6.1" -numexpr = "~2.9.0" -opensearch-py = "2.4.0" qrcode = "~7.4.2" -twilio = "~9.0.4" -vanna = { version = "0.5.5", extras = ["postgres", "mysql", "clickhouse", "duckdb"] } -wikipedia = "1.4.0" -yfinance = "~0.2.40" -nltk = "3.8.1" ############################################################ # VDB dependencies required by vector store clients ############################################################ diff --git a/api/tests/integration_tests/model_runtime/__mock/anthropic.py b/api/tests/integration_tests/model_runtime/__mock/anthropic.py deleted file mode 100644 index 79a3dc0394..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/anthropic.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -from collections.abc import Iterable -from typing import Any, Literal, Union - -import anthropic -import pytest -from _pytest.monkeypatch import MonkeyPatch -from anthropic import Anthropic, Stream -from anthropic.resources import Messages -from anthropic.types import ( - ContentBlock, - ContentBlockDeltaEvent, - Message, - MessageDeltaEvent, - MessageDeltaUsage, - MessageParam, - MessageStartEvent, - MessageStopEvent, - MessageStreamEvent, - TextDelta, - Usage, -) -from anthropic.types.message_delta_event import Delta - -MOCK = os.getenv("MOCK_SWITCH", "false") == "true" - - -class MockAnthropicClass: - @staticmethod - def mocked_anthropic_chat_create_sync(model: str) -> Message: - return Message( - id="msg-123", - type="message", - role="assistant", - content=[ContentBlock(text="hello, I'm a chatbot from anthropic", type="text")], - model=model, - stop_reason="stop_sequence", - usage=Usage(input_tokens=1, output_tokens=1), - ) - - @staticmethod - def mocked_anthropic_chat_create_stream(model: str) -> Stream[MessageStreamEvent]: - full_response_text = "hello, I'm a chatbot from anthropic" - - yield MessageStartEvent( - type="message_start", - message=Message( - id="msg-123", - content=[], - role="assistant", - model=model, - stop_reason=None, - type="message", - usage=Usage(input_tokens=1, output_tokens=1), - ), - ) - - index = 0 - for i in range(0, len(full_response_text)): - yield ContentBlockDeltaEvent( - type="content_block_delta", delta=TextDelta(text=full_response_text[i], type="text_delta"), index=index - ) - - index += 1 - - yield MessageDeltaEvent( - type="message_delta", delta=Delta(stop_reason="stop_sequence"), usage=MessageDeltaUsage(output_tokens=1) - ) - - yield MessageStopEvent(type="message_stop") - - def mocked_anthropic( - self: Messages, - *, - max_tokens: int, - messages: Iterable[MessageParam], - model: str, - stream: Literal[True], - **kwargs: Any, - ) -> Union[Message, Stream[MessageStreamEvent]]: - if len(self._client.api_key) < 18: - raise anthropic.AuthenticationError("Invalid API key") - - if stream: - return MockAnthropicClass.mocked_anthropic_chat_create_stream(model=model) - else: - return MockAnthropicClass.mocked_anthropic_chat_create_sync(model=model) - - -@pytest.fixture -def setup_anthropic_mock(request, monkeypatch: MonkeyPatch): - if MOCK: - monkeypatch.setattr(Messages, "create", MockAnthropicClass.mocked_anthropic) - - yield - - if MOCK: - monkeypatch.undo() diff --git a/api/tests/integration_tests/model_runtime/__mock/fishaudio.py b/api/tests/integration_tests/model_runtime/__mock/fishaudio.py deleted file mode 100644 index bec3babeaf..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/fishaudio.py +++ /dev/null @@ -1,82 +0,0 @@ -import os -from collections.abc import Callable -from typing import Literal - -import httpx -import pytest -from _pytest.monkeypatch import MonkeyPatch - - -def mock_get(*args, **kwargs): - if kwargs.get("headers", {}).get("Authorization") != "Bearer test": - raise httpx.HTTPStatusError( - "Invalid API key", - request=httpx.Request("GET", ""), - response=httpx.Response(401), - ) - - return httpx.Response( - 200, - json={ - "items": [ - {"title": "Model 1", "_id": "model1"}, - {"title": "Model 2", "_id": "model2"}, - ] - }, - request=httpx.Request("GET", ""), - ) - - -def mock_stream(*args, **kwargs): - class MockStreamResponse: - def __init__(self): - self.status_code = 200 - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - def iter_bytes(self): - yield b"Mocked audio data" - - return MockStreamResponse() - - -def mock_fishaudio( - monkeypatch: MonkeyPatch, - methods: list[Literal["list-models", "tts"]], -) -> Callable[[], None]: - """ - mock fishaudio module - - :param monkeypatch: pytest monkeypatch fixture - :return: unpatch function - """ - - def unpatch() -> None: - monkeypatch.undo() - - if "list-models" in methods: - monkeypatch.setattr(httpx, "get", mock_get) - - if "tts" in methods: - monkeypatch.setattr(httpx, "stream", mock_stream) - - return unpatch - - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_fishaudio_mock(request, monkeypatch): - methods = request.param if hasattr(request, "param") else [] - if MOCK: - unpatch = mock_fishaudio(monkeypatch, methods=methods) - - yield - - if MOCK: - unpatch() diff --git a/api/tests/integration_tests/model_runtime/__mock/google.py b/api/tests/integration_tests/model_runtime/__mock/google.py deleted file mode 100644 index 402bd9c2c2..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/google.py +++ /dev/null @@ -1,116 +0,0 @@ -from collections.abc import Generator - -import google.generativeai.types.generation_types as generation_config_types -import pytest -from _pytest.monkeypatch import MonkeyPatch -from google.ai import generativelanguage as glm -from google.ai.generativelanguage_v1beta.types import content as gag_content -from google.generativeai import GenerativeModel -from google.generativeai.client import _ClientManager, configure -from google.generativeai.types import GenerateContentResponse, content_types, safety_types -from google.generativeai.types.generation_types import BaseGenerateContentResponse - -current_api_key = "" - - -class MockGoogleResponseClass: - _done = False - - def __iter__(self): - full_response_text = "it's google!" - - for i in range(0, len(full_response_text) + 1, 1): - if i == len(full_response_text): - self._done = True - yield GenerateContentResponse( - done=True, iterator=None, result=glm.GenerateContentResponse({}), chunks=[] - ) - else: - yield GenerateContentResponse( - done=False, iterator=None, result=glm.GenerateContentResponse({}), chunks=[] - ) - - -class MockGoogleResponseCandidateClass: - finish_reason = "stop" - - @property - def content(self) -> gag_content.Content: - return gag_content.Content(parts=[gag_content.Part(text="it's google!")]) - - -class MockGoogleClass: - @staticmethod - def generate_content_sync() -> GenerateContentResponse: - return GenerateContentResponse(done=True, iterator=None, result=glm.GenerateContentResponse({}), chunks=[]) - - @staticmethod - def generate_content_stream() -> Generator[GenerateContentResponse, None, None]: - return MockGoogleResponseClass() - - def generate_content( - self: GenerativeModel, - contents: content_types.ContentsType, - *, - generation_config: generation_config_types.GenerationConfigType | None = None, - safety_settings: safety_types.SafetySettingOptions | None = None, - stream: bool = False, - **kwargs, - ) -> GenerateContentResponse: - global current_api_key - - if len(current_api_key) < 16: - raise Exception("Invalid API key") - - if stream: - return MockGoogleClass.generate_content_stream() - - return MockGoogleClass.generate_content_sync() - - @property - def generative_response_text(self) -> str: - return "it's google!" - - @property - def generative_response_candidates(self) -> list[MockGoogleResponseCandidateClass]: - return [MockGoogleResponseCandidateClass()] - - def make_client(self: _ClientManager, name: str): - global current_api_key - - if name.endswith("_async"): - name = name.split("_")[0] - cls = getattr(glm, name.title() + "ServiceAsyncClient") - else: - cls = getattr(glm, name.title() + "ServiceClient") - - # Attempt to configure using defaults. - if not self.client_config: - configure() - - client_options = self.client_config.get("client_options", None) - if client_options: - current_api_key = client_options.api_key - - def nop(self, *args, **kwargs): - pass - - original_init = cls.__init__ - cls.__init__ = nop - client: glm.GenerativeServiceClient = cls(**self.client_config) - cls.__init__ = original_init - - if not self.default_metadata: - return client - - -@pytest.fixture -def setup_google_mock(request, monkeypatch: MonkeyPatch): - monkeypatch.setattr(BaseGenerateContentResponse, "text", MockGoogleClass.generative_response_text) - monkeypatch.setattr(BaseGenerateContentResponse, "candidates", MockGoogleClass.generative_response_candidates) - monkeypatch.setattr(GenerativeModel, "generate_content", MockGoogleClass.generate_content) - monkeypatch.setattr(_ClientManager, "make_client", MockGoogleClass.make_client) - - yield - - monkeypatch.undo() diff --git a/api/tests/integration_tests/model_runtime/__mock/huggingface.py b/api/tests/integration_tests/model_runtime/__mock/huggingface.py deleted file mode 100644 index 97038ef596..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/huggingface.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -import pytest -from _pytest.monkeypatch import MonkeyPatch -from huggingface_hub import InferenceClient - -from tests.integration_tests.model_runtime.__mock.huggingface_chat import MockHuggingfaceChatClass - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_huggingface_mock(request, monkeypatch: MonkeyPatch): - if MOCK: - monkeypatch.setattr(InferenceClient, "text_generation", MockHuggingfaceChatClass.text_generation) - - yield - - if MOCK: - monkeypatch.undo() diff --git a/api/tests/integration_tests/model_runtime/__mock/huggingface_chat.py b/api/tests/integration_tests/model_runtime/__mock/huggingface_chat.py deleted file mode 100644 index 9ee76c935c..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/huggingface_chat.py +++ /dev/null @@ -1,56 +0,0 @@ -import re -from collections.abc import Generator -from typing import Any, Literal, Optional, Union - -from _pytest.monkeypatch import MonkeyPatch -from huggingface_hub import InferenceClient -from huggingface_hub.inference._text_generation import ( - Details, - StreamDetails, - TextGenerationResponse, - TextGenerationStreamResponse, - Token, -) -from huggingface_hub.utils import BadRequestError - - -class MockHuggingfaceChatClass: - @staticmethod - def generate_create_sync(model: str) -> TextGenerationResponse: - response = TextGenerationResponse( - generated_text="You can call me Miku Miku o~e~o~", - details=Details( - finish_reason="length", - generated_tokens=6, - tokens=[Token(id=0, text="You", logprob=0.0, special=False) for i in range(0, 6)], - ), - ) - - return response - - @staticmethod - def generate_create_stream(model: str) -> Generator[TextGenerationStreamResponse, None, None]: - full_text = "You can call me Miku Miku o~e~o~" - - for i in range(0, len(full_text)): - response = TextGenerationStreamResponse( - token=Token(id=i, text=full_text[i], logprob=0.0, special=False), - ) - response.generated_text = full_text[i] - response.details = StreamDetails(finish_reason="stop_sequence", generated_tokens=1) - - yield response - - def text_generation( - self: InferenceClient, prompt: str, *, stream: Literal[False] = ..., model: Optional[str] = None, **kwargs: Any - ) -> Union[TextGenerationResponse, Generator[TextGenerationStreamResponse, None, None]]: - # check if key is valid - if not re.match(r"Bearer\shf\-[a-zA-Z0-9]{16,}", self.headers["authorization"]): - raise BadRequestError("Invalid API key") - - if model is None: - raise BadRequestError("Invalid model") - - if stream: - return MockHuggingfaceChatClass.generate_create_stream(model) - return MockHuggingfaceChatClass.generate_create_sync(model) diff --git a/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py b/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py deleted file mode 100644 index b9a721c803..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/huggingface_tei.py +++ /dev/null @@ -1,94 +0,0 @@ -from core.model_runtime.model_providers.huggingface_tei.tei_helper import TeiModelExtraParameter - - -class MockTEIClass: - @staticmethod - def get_tei_extra_parameter(server_url: str, model_name: str) -> TeiModelExtraParameter: - # During mock, we don't have a real server to query, so we just return a dummy value - if "rerank" in model_name: - model_type = "reranker" - else: - model_type = "embedding" - - return TeiModelExtraParameter(model_type=model_type, max_input_length=512, max_client_batch_size=1) - - @staticmethod - def invoke_tokenize(server_url: str, texts: list[str]) -> list[list[dict]]: - # Use space as token separator, and split the text into tokens - tokenized_texts = [] - for text in texts: - tokens = text.split(" ") - current_index = 0 - tokenized_text = [] - for idx, token in enumerate(tokens): - s_token = { - "id": idx, - "text": token, - "special": False, - "start": current_index, - "stop": current_index + len(token), - } - current_index += len(token) + 1 - tokenized_text.append(s_token) - tokenized_texts.append(tokenized_text) - return tokenized_texts - - @staticmethod - def invoke_embeddings(server_url: str, texts: list[str]) -> dict: - # { - # "object": "list", - # "data": [ - # { - # "object": "embedding", - # "embedding": [...], - # "index": 0 - # } - # ], - # "model": "MODEL_NAME", - # "usage": { - # "prompt_tokens": 3, - # "total_tokens": 3 - # } - # } - embeddings = [] - for idx in range(len(texts)): - embedding = [0.1] * 768 - embeddings.append( - { - "object": "embedding", - "embedding": embedding, - "index": idx, - } - ) - return { - "object": "list", - "data": embeddings, - "model": "MODEL_NAME", - "usage": { - "prompt_tokens": sum(len(text.split(" ")) for text in texts), - "total_tokens": sum(len(text.split(" ")) for text in texts), - }, - } - - @staticmethod - def invoke_rerank(server_url: str, query: str, texts: list[str]) -> list[dict]: - # Example response: - # [ - # { - # "index": 0, - # "text": "Deep Learning is ...", - # "score": 0.9950755 - # } - # ] - reranked_docs = [] - for idx, text in enumerate(texts): - reranked_docs.append( - { - "index": idx, - "text": text, - "score": 0.9, - } - ) - # For mock, only return the first document - break - return reranked_docs diff --git a/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py deleted file mode 100644 index 281e866e45..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/nomic_embeddings.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -from collections.abc import Callable -from typing import Any, Literal, Union - -import pytest - -# import monkeypatch -from _pytest.monkeypatch import MonkeyPatch -from nomic import embed - - -def create_embedding(texts: list[str], model: str, **kwargs: Any) -> dict: - texts_len = len(texts) - - foo_embedding_sample = 0.123456 - - combined = { - "embeddings": [[foo_embedding_sample for _ in range(768)] for _ in range(texts_len)], - "usage": {"prompt_tokens": texts_len, "total_tokens": texts_len}, - "model": model, - "inference_mode": "remote", - } - - return combined - - -def mock_nomic( - monkeypatch: MonkeyPatch, - methods: list[Literal["text_embedding"]], -) -> Callable[[], None]: - """ - mock nomic module - - :param monkeypatch: pytest monkeypatch fixture - :return: unpatch function - """ - - def unpatch() -> None: - monkeypatch.undo() - - if "text_embedding" in methods: - monkeypatch.setattr(embed, "text", create_embedding) - - return unpatch - - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_nomic_mock(request, monkeypatch): - methods = request.param if hasattr(request, "param") else [] - if MOCK: - unpatch = mock_nomic(monkeypatch, methods=methods) - - yield - - if MOCK: - unpatch() diff --git a/api/tests/integration_tests/model_runtime/__mock/openai.py b/api/tests/integration_tests/model_runtime/__mock/openai.py index 6637f4f212..22d099739a 100644 --- a/api/tests/integration_tests/model_runtime/__mock/openai.py +++ b/api/tests/integration_tests/model_runtime/__mock/openai.py @@ -6,19 +6,9 @@ import pytest # import monkeypatch from _pytest.monkeypatch import MonkeyPatch -from openai.resources.audio.transcriptions import Transcriptions -from openai.resources.chat import Completions as ChatCompletions -from openai.resources.completions import Completions -from openai.resources.embeddings import Embeddings -from openai.resources.models import Models from openai.resources.moderations import Moderations -from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass -from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass -from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass from tests.integration_tests.model_runtime.__mock.openai_moderation import MockModerationClass -from tests.integration_tests.model_runtime.__mock.openai_remote import MockModelClass -from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass def mock_openai( @@ -35,24 +25,9 @@ def mock_openai( def unpatch() -> None: monkeypatch.undo() - if "completion" in methods: - monkeypatch.setattr(Completions, "create", MockCompletionsClass.completion_create) - - if "chat" in methods: - monkeypatch.setattr(ChatCompletions, "create", MockChatClass.chat_create) - - if "remote" in methods: - monkeypatch.setattr(Models, "list", MockModelClass.list) - if "moderation" in methods: monkeypatch.setattr(Moderations, "create", MockModerationClass.moderation_create) - if "speech2text" in methods: - monkeypatch.setattr(Transcriptions, "create", MockSpeech2TextClass.speech2text_create) - - if "text_embedding" in methods: - monkeypatch.setattr(Embeddings, "create", MockEmbeddingsClass.create_embeddings) - return unpatch diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_chat.py b/api/tests/integration_tests/model_runtime/__mock/openai_chat.py deleted file mode 100644 index 439f7d56e9..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/openai_chat.py +++ /dev/null @@ -1,269 +0,0 @@ -import re -from collections.abc import Generator -from json import dumps, loads -from time import time - -# import monkeypatch -from typing import Any, Literal, Optional, Union - -from openai import AzureOpenAI, OpenAI -from openai._types import NOT_GIVEN, NotGiven -from openai.resources.chat.completions import Completions -from openai.types import Completion as CompletionMessage -from openai.types.chat import ( - ChatCompletion, - ChatCompletionChunk, - ChatCompletionMessageParam, - ChatCompletionMessageToolCall, - ChatCompletionToolChoiceOptionParam, - ChatCompletionToolParam, - completion_create_params, -) -from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion -from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice -from openai.types.chat.chat_completion_chunk import ( - Choice, - ChoiceDelta, - ChoiceDeltaFunctionCall, - ChoiceDeltaToolCall, - ChoiceDeltaToolCallFunction, -) -from openai.types.chat.chat_completion_message import ChatCompletionMessage, FunctionCall -from openai.types.chat.chat_completion_message_tool_call import Function -from openai.types.completion_usage import CompletionUsage - -from core.model_runtime.errors.invoke import InvokeAuthorizationError - - -class MockChatClass: - @staticmethod - def generate_function_call( - functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN, - ) -> Optional[FunctionCall]: - if not functions or len(functions) == 0: - return None - function: completion_create_params.Function = functions[0] - function_name = function["name"] - function_description = function["description"] - function_parameters = function["parameters"] - function_parameters_type = function_parameters["type"] - if function_parameters_type != "object": - return None - function_parameters_properties = function_parameters["properties"] - function_parameters_required = function_parameters["required"] - parameters = {} - for parameter_name, parameter in function_parameters_properties.items(): - if parameter_name not in function_parameters_required: - continue - parameter_type = parameter["type"] - if parameter_type == "string": - if "enum" in parameter: - if len(parameter["enum"]) == 0: - continue - parameters[parameter_name] = parameter["enum"][0] - else: - parameters[parameter_name] = "kawaii" - elif parameter_type == "integer": - parameters[parameter_name] = 114514 - elif parameter_type == "number": - parameters[parameter_name] = 1919810.0 - elif parameter_type == "boolean": - parameters[parameter_name] = True - - return FunctionCall(name=function_name, arguments=dumps(parameters)) - - @staticmethod - def generate_tool_calls(tools=NOT_GIVEN) -> Optional[list[ChatCompletionMessageToolCall]]: - list_tool_calls = [] - if not tools or len(tools) == 0: - return None - tool = tools[0] - - if "type" in tools and tools["type"] != "function": - return None - - function = tool["function"] - - function_call = MockChatClass.generate_function_call(functions=[function]) - if function_call is None: - return None - - list_tool_calls.append( - ChatCompletionMessageToolCall( - id="sakurajima-mai", - function=Function( - name=function_call.name, - arguments=function_call.arguments, - ), - type="function", - ) - ) - - return list_tool_calls - - @staticmethod - def mocked_openai_chat_create_sync( - model: str, - functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN, - tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - ) -> CompletionMessage: - tool_calls = [] - function_call = MockChatClass.generate_function_call(functions=functions) - if not function_call: - tool_calls = MockChatClass.generate_tool_calls(tools=tools) - - return _ChatCompletion( - id="cmpl-3QJQa5jXJ5Z5X", - choices=[ - _ChatCompletionChoice( - finish_reason="content_filter", - index=0, - message=ChatCompletionMessage( - content="elaina", role="assistant", function_call=function_call, tool_calls=tool_calls - ), - ) - ], - created=int(time()), - model=model, - object="chat.completion", - system_fingerprint="", - usage=CompletionUsage( - prompt_tokens=2, - completion_tokens=1, - total_tokens=3, - ), - ) - - @staticmethod - def mocked_openai_chat_create_stream( - model: str, - functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN, - tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - ) -> Generator[ChatCompletionChunk, None, None]: - tool_calls = [] - function_call = MockChatClass.generate_function_call(functions=functions) - if not function_call: - tool_calls = MockChatClass.generate_tool_calls(tools=tools) - - full_text = "Hello, world!\n\n```python\nprint('Hello, world!')\n```" - for i in range(0, len(full_text) + 1): - if i == len(full_text): - yield ChatCompletionChunk( - id="cmpl-3QJQa5jXJ5Z5X", - choices=[ - Choice( - delta=ChoiceDelta( - content="", - function_call=ChoiceDeltaFunctionCall( - name=function_call.name, - arguments=function_call.arguments, - ) - if function_call - else None, - role="assistant", - tool_calls=[ - ChoiceDeltaToolCall( - index=0, - id="misaka-mikoto", - function=ChoiceDeltaToolCallFunction( - name=tool_calls[0].function.name, - arguments=tool_calls[0].function.arguments, - ), - type="function", - ) - ] - if tool_calls and len(tool_calls) > 0 - else None, - ), - finish_reason="function_call", - index=0, - ) - ], - created=int(time()), - model=model, - object="chat.completion.chunk", - system_fingerprint="", - usage=CompletionUsage( - prompt_tokens=2, - completion_tokens=17, - total_tokens=19, - ), - ) - else: - yield ChatCompletionChunk( - id="cmpl-3QJQa5jXJ5Z5X", - choices=[ - Choice( - delta=ChoiceDelta( - content=full_text[i], - role="assistant", - ), - finish_reason="content_filter", - index=0, - ) - ], - created=int(time()), - model=model, - object="chat.completion.chunk", - system_fingerprint="", - ) - - def chat_create( - self: Completions, - *, - messages: list[ChatCompletionMessageParam], - model: Union[ - str, - Literal[ - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - ], - ], - functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN, - response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, - **kwargs: Any, - ): - openai_models = [ - "gpt-4-1106-preview", - "gpt-4-vision-preview", - "gpt-4", - "gpt-4-0314", - "gpt-4-0613", - "gpt-4-32k", - "gpt-4-32k-0314", - "gpt-4-32k-0613", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", - "gpt-3.5-turbo-0301", - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - ] - azure_openai_models = ["gpt35", "gpt-4v", "gpt-35-turbo"] - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)): - raise InvokeAuthorizationError("Invalid base url") - if model in openai_models + azure_openai_models: - if not re.match(r"sk-[a-zA-Z0-9]{24,}$", self._client.api_key) and type(self._client) == OpenAI: - # sometime, provider use OpenAI compatible API will not have api key or have different api key format - # so we only check if model is in openai_models - raise InvokeAuthorizationError("Invalid api key") - if len(self._client.api_key) < 18 and type(self._client) == AzureOpenAI: - raise InvokeAuthorizationError("Invalid api key") - if stream: - return MockChatClass.mocked_openai_chat_create_stream(model=model, functions=functions, tools=tools) - - return MockChatClass.mocked_openai_chat_create_sync(model=model, functions=functions, tools=tools) diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_completion.py b/api/tests/integration_tests/model_runtime/__mock/openai_completion.py deleted file mode 100644 index 14223668e0..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/openai_completion.py +++ /dev/null @@ -1,130 +0,0 @@ -import re -from collections.abc import Generator -from time import time - -# import monkeypatch -from typing import Any, Literal, Optional, Union - -from openai import AzureOpenAI, BadRequestError, OpenAI -from openai._types import NOT_GIVEN, NotGiven -from openai.resources.completions import Completions -from openai.types import Completion as CompletionMessage -from openai.types.completion import CompletionChoice -from openai.types.completion_usage import CompletionUsage - -from core.model_runtime.errors.invoke import InvokeAuthorizationError - - -class MockCompletionsClass: - @staticmethod - def mocked_openai_completion_create_sync(model: str) -> CompletionMessage: - return CompletionMessage( - id="cmpl-3QJQa5jXJ5Z5X", - object="text_completion", - created=int(time()), - model=model, - system_fingerprint="", - choices=[ - CompletionChoice( - text="mock", - index=0, - logprobs=None, - finish_reason="stop", - ) - ], - usage=CompletionUsage( - prompt_tokens=2, - completion_tokens=1, - total_tokens=3, - ), - ) - - @staticmethod - def mocked_openai_completion_create_stream(model: str) -> Generator[CompletionMessage, None, None]: - full_text = "Hello, world!\n\n```python\nprint('Hello, world!')\n```" - for i in range(0, len(full_text) + 1): - if i == len(full_text): - yield CompletionMessage( - id="cmpl-3QJQa5jXJ5Z5X", - object="text_completion", - created=int(time()), - model=model, - system_fingerprint="", - choices=[ - CompletionChoice( - text="", - index=0, - logprobs=None, - finish_reason="stop", - ) - ], - usage=CompletionUsage( - prompt_tokens=2, - completion_tokens=17, - total_tokens=19, - ), - ) - else: - yield CompletionMessage( - id="cmpl-3QJQa5jXJ5Z5X", - object="text_completion", - created=int(time()), - model=model, - system_fingerprint="", - choices=[ - CompletionChoice(text=full_text[i], index=0, logprobs=None, finish_reason="content_filter") - ], - ) - - def completion_create( - self: Completions, - *, - model: Union[ - str, - Literal[ - "babbage-002", - "davinci-002", - "gpt-3.5-turbo-instruct", - "text-davinci-003", - "text-davinci-002", - "text-davinci-001", - "code-davinci-002", - "text-curie-001", - "text-babbage-001", - "text-ada-001", - ], - ], - prompt: Union[str, list[str], list[int], list[list[int]], None], - stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, - **kwargs: Any, - ): - openai_models = [ - "babbage-002", - "davinci-002", - "gpt-3.5-turbo-instruct", - "text-davinci-003", - "text-davinci-002", - "text-davinci-001", - "code-davinci-002", - "text-curie-001", - "text-babbage-001", - "text-ada-001", - ] - azure_openai_models = ["gpt-35-turbo-instruct"] - - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)): - raise InvokeAuthorizationError("Invalid base url") - if model in openai_models + azure_openai_models: - if not re.match(r"sk-[a-zA-Z0-9]{24,}$", self._client.api_key) and type(self._client) == OpenAI: - # sometime, provider use OpenAI compatible API will not have api key or have different api key format - # so we only check if model is in openai_models - raise InvokeAuthorizationError("Invalid api key") - if len(self._client.api_key) < 18 and type(self._client) == AzureOpenAI: - raise InvokeAuthorizationError("Invalid api key") - - if not prompt: - raise BadRequestError("Invalid prompt") - if stream: - return MockCompletionsClass.mocked_openai_completion_create_stream(model=model) - - return MockCompletionsClass.mocked_openai_completion_create_sync(model=model) diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py deleted file mode 100644 index e27b9891f5..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py +++ /dev/null @@ -1,58 +0,0 @@ -import re -from typing import Any, Literal, Union - -from openai import OpenAI -from openai._types import NOT_GIVEN, NotGiven -from openai.resources.embeddings import Embeddings -from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage -from openai.types.embedding import Embedding - -from core.model_runtime.errors.invoke import InvokeAuthorizationError - - -class MockEmbeddingsClass: - def create_embeddings( - self: Embeddings, - *, - input: Union[str, list[str], list[int], list[list[int]]], - model: Union[str, Literal["text-embedding-ada-002"]], - encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, - **kwargs: Any, - ) -> CreateEmbeddingResponse: - if isinstance(input, str): - input = [input] - - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)): - raise InvokeAuthorizationError("Invalid base url") - - if len(self._client.api_key) < 18: - raise InvokeAuthorizationError("Invalid API key") - - if encoding_format == "float": - return CreateEmbeddingResponse( - data=[ - Embedding(embedding=[0.23333 for _ in range(233)], index=i, object="embedding") - for i in range(len(input)) - ], - model=model, - object="list", - # marked: usage of embeddings should equal the number of testcase - usage=Usage(prompt_tokens=2, total_tokens=2), - ) - - embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" # noqa: E501 - - data = [] - for i, text in enumerate(input): - obj = Embedding(embedding=[], index=i, object="embedding") - obj.embedding = embeddings - - data.append(obj) - - return CreateEmbeddingResponse( - data=data, - model=model, - object="list", - # marked: usage of embeddings should equal the number of testcase - usage=Usage(prompt_tokens=2, total_tokens=2), - ) diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_remote.py b/api/tests/integration_tests/model_runtime/__mock/openai_remote.py deleted file mode 100644 index cb8f249543..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/openai_remote.py +++ /dev/null @@ -1,23 +0,0 @@ -from time import time - -from openai.resources.models import Models -from openai.types.model import Model - - -class MockModelClass: - """ - mock class for openai.models.Models - """ - - def list( - self, - **kwargs, - ) -> list[Model]: - return [ - Model( - id="ft:gpt-3.5-turbo-0613:personal::8GYJLPDQ", - created=int(time()), - object="model", - owned_by="organization:org-123", - ) - ] diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_speech2text.py b/api/tests/integration_tests/model_runtime/__mock/openai_speech2text.py deleted file mode 100644 index a51dcab4be..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/openai_speech2text.py +++ /dev/null @@ -1,29 +0,0 @@ -import re -from typing import Any, Literal, Union - -from openai._types import NOT_GIVEN, FileTypes, NotGiven -from openai.resources.audio.transcriptions import Transcriptions -from openai.types.audio.transcription import Transcription - -from core.model_runtime.errors.invoke import InvokeAuthorizationError - - -class MockSpeech2TextClass: - def speech2text_create( - self: Transcriptions, - *, - file: FileTypes, - model: Union[str, Literal["whisper-1"]], - language: str | NotGiven = NOT_GIVEN, - prompt: str | NotGiven = NOT_GIVEN, - response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, - temperature: float | NotGiven = NOT_GIVEN, - **kwargs: Any, - ) -> Transcription: - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)): - raise InvokeAuthorizationError("Invalid base url") - - if len(self._client.api_key) < 18: - raise InvokeAuthorizationError("Invalid API key") - - return Transcription(text="1, 2, 3, 4, 5, 6, 7, 8, 9, 10") diff --git a/api/tests/integration_tests/model_runtime/__mock/xinference.py b/api/tests/integration_tests/model_runtime/__mock/xinference.py deleted file mode 100644 index 8deb50635f..0000000000 --- a/api/tests/integration_tests/model_runtime/__mock/xinference.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import re -from typing import Union - -import pytest -from _pytest.monkeypatch import MonkeyPatch -from requests import Response -from requests.exceptions import ConnectionError -from requests.sessions import Session -from xinference_client.client.restful.restful_client import ( - Client, - RESTfulChatModelHandle, - RESTfulEmbeddingModelHandle, - RESTfulGenerateModelHandle, - RESTfulRerankModelHandle, -) -from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage - - -class MockXinferenceClass: - def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulGenerateModelHandle, RESTfulChatModelHandle]: - if not re.match(r"https?:\/\/[^\s\/$.?#].[^\s]*$", self.base_url): - raise RuntimeError("404 Not Found") - - if "generate" == model_uid: - return RESTfulGenerateModelHandle(model_uid, base_url=self.base_url, auth_headers={}) - if "chat" == model_uid: - return RESTfulChatModelHandle(model_uid, base_url=self.base_url, auth_headers={}) - if "embedding" == model_uid: - return RESTfulEmbeddingModelHandle(model_uid, base_url=self.base_url, auth_headers={}) - if "rerank" == model_uid: - return RESTfulRerankModelHandle(model_uid, base_url=self.base_url, auth_headers={}) - raise RuntimeError("404 Not Found") - - def get(self: Session, url: str, **kwargs): - response = Response() - if "v1/models/" in url: - # get model uid - model_uid = url.split("/")[-1] or "" - if not re.match( - r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", model_uid - ) and model_uid not in {"generate", "chat", "embedding", "rerank"}: - response.status_code = 404 - response._content = b"{}" - return response - - # check if url is valid - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", url): - response.status_code = 404 - response._content = b"{}" - return response - - if model_uid in {"generate", "chat"}: - response.status_code = 200 - response._content = b"""{ - "model_type": "LLM", - "address": "127.0.0.1:43877", - "accelerators": [ - "0", - "1" - ], - "model_name": "chatglm3-6b", - "model_lang": [ - "en" - ], - "model_ability": [ - "generate", - "chat" - ], - "model_description": "latest chatglm3", - "model_format": "pytorch", - "model_size_in_billions": 7, - "quantization": "none", - "model_hub": "huggingface", - "revision": null, - "context_length": 2048, - "replica": 1 - }""" - return response - - elif model_uid == "embedding": - response.status_code = 200 - response._content = b"""{ - "model_type": "embedding", - "address": "127.0.0.1:43877", - "accelerators": [ - "0", - "1" - ], - "model_name": "bge", - "model_lang": [ - "en" - ], - "revision": null, - "max_tokens": 512 - }""" - return response - - elif "v1/cluster/auth" in url: - response.status_code = 200 - response._content = b"""{ - "auth": true - }""" - return response - - def _check_cluster_authenticated(self): - self._cluster_authed = True - - def rerank( - self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int, return_documents: bool - ) -> dict: - # check if self._model_uid is a valid uuid - if ( - not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid) - and self._model_uid != "rerank" - ): - raise RuntimeError("404 Not Found") - - if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._base_url): - raise RuntimeError("404 Not Found") - - if top_n is None: - top_n = 1 - - return { - "results": [ - {"index": i, "document": doc, "relevance_score": 0.9} for i, doc in enumerate(documents[:top_n]) - ] - } - - def create_embedding(self: RESTfulGenerateModelHandle, input: Union[str, list[str]], **kwargs) -> dict: - # check if self._model_uid is a valid uuid - if ( - not re.match(r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", self._model_uid) - and self._model_uid != "embedding" - ): - raise RuntimeError("404 Not Found") - - if isinstance(input, str): - input = [input] - ipt_len = len(input) - - embedding = Embedding( - object="list", - model=self._model_uid, - data=[ - EmbeddingData(index=i, object="embedding", embedding=[1919.810 for _ in range(768)]) - for i in range(ipt_len) - ], - usage=EmbeddingUsage(prompt_tokens=ipt_len, total_tokens=ipt_len), - ) - - return embedding - - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_xinference_mock(request, monkeypatch: MonkeyPatch): - if MOCK: - monkeypatch.setattr(Client, "get_model", MockXinferenceClass.get_chat_model) - monkeypatch.setattr(Client, "_check_cluster_authenticated", MockXinferenceClass._check_cluster_authenticated) - monkeypatch.setattr(Session, "get", MockXinferenceClass.get) - monkeypatch.setattr(RESTfulEmbeddingModelHandle, "create_embedding", MockXinferenceClass.create_embedding) - monkeypatch.setattr(RESTfulRerankModelHandle, "rerank", MockXinferenceClass.rerank) - yield - - if MOCK: - monkeypatch.undo() diff --git a/api/tests/integration_tests/model_runtime/anthropic/__init__.py b/api/tests/integration_tests/model_runtime/anthropic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/anthropic/test_llm.py b/api/tests/integration_tests/model_runtime/anthropic/test_llm.py deleted file mode 100644 index 8f7e9ec487..0000000000 --- a/api/tests/integration_tests/model_runtime/anthropic/test_llm.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.anthropic.llm.llm import AnthropicLargeLanguageModel -from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock - - -@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_anthropic_mock): - model = AnthropicLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="claude-instant-1.2", credentials={"anthropic_api_key": "invalid_key"}) - - model.validate_credentials( - model="claude-instant-1.2", credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True) -def test_invoke_model(setup_anthropic_mock): - model = AnthropicLargeLanguageModel() - - response = model.invoke( - model="claude-instant-1.2", - credentials={ - "anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY"), - "anthropic_api_url": os.environ.get("ANTHROPIC_API_URL"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "top_p": 1.0, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True) -def test_invoke_stream_model(setup_anthropic_mock): - model = AnthropicLargeLanguageModel() - - response = model.invoke( - model="claude-instant-1.2", - credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = AnthropicLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="claude-instant-1.2", - credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 18 diff --git a/api/tests/integration_tests/model_runtime/anthropic/test_provider.py b/api/tests/integration_tests/model_runtime/anthropic/test_provider.py deleted file mode 100644 index 6f1e50f431..0000000000 --- a/api/tests/integration_tests/model_runtime/anthropic/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.anthropic.anthropic import AnthropicProvider -from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock - - -@pytest.mark.parametrize("setup_anthropic_mock", [["none"]], indirect=True) -def test_validate_provider_credentials(setup_anthropic_mock): - provider = AnthropicProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"anthropic_api_key": os.environ.get("ANTHROPIC_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/assets/audio.mp3 b/api/tests/integration_tests/model_runtime/assets/audio.mp3 deleted file mode 100644 index 7c86e02e16..0000000000 Binary files a/api/tests/integration_tests/model_runtime/assets/audio.mp3 and /dev/null differ diff --git a/api/tests/integration_tests/model_runtime/azure_ai_studio/__init__.py b/api/tests/integration_tests/model_runtime/azure_ai_studio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_llm.py b/api/tests/integration_tests/model_runtime/azure_ai_studio/test_llm.py deleted file mode 100644 index 8655b43d8f..0000000000 --- a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_llm.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.azure_ai_studio.llm.llm import AzureAIStudioLargeLanguageModel -from tests.integration_tests.model_runtime.__mock.azure_ai_studio import setup_azure_ai_studio_mock - - -@pytest.mark.parametrize("setup_azure_ai_studio_mock", [["chat"]], indirect=True) -def test_validate_credentials(setup_azure_ai_studio_mock): - model = AzureAIStudioLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="gpt-35-turbo", - credentials={"api_key": "invalid_key", "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE")}, - ) - - model.validate_credentials( - model="gpt-35-turbo", - credentials={ - "api_key": os.getenv("AZURE_AI_STUDIO_API_KEY"), - "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE"), - }, - ) - - -@pytest.mark.parametrize("setup_azure_ai_studio_mock", [["chat"]], indirect=True) -def test_invoke_model(setup_azure_ai_studio_mock): - model = AzureAIStudioLargeLanguageModel() - - result = model.invoke( - model="gpt-35-turbo", - credentials={ - "api_key": os.getenv("AZURE_AI_STUDIO_API_KEY"), - "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_azure_ai_studio_mock", [["chat"]], indirect=True) -def test_invoke_stream_model(setup_azure_ai_studio_mock): - model = AzureAIStudioLargeLanguageModel() - - result = model.invoke( - model="gpt-35-turbo", - credentials={ - "api_key": os.getenv("AZURE_AI_STUDIO_API_KEY"), - "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -def test_get_num_tokens(): - model = AzureAIStudioLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="gpt-35-turbo", - credentials={ - "api_key": os.getenv("AZURE_AI_STUDIO_API_KEY"), - "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_provider.py b/api/tests/integration_tests/model_runtime/azure_ai_studio/test_provider.py deleted file mode 100644 index 8afe38b09b..0000000000 --- a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.azure_ai_studio.azure_ai_studio import AzureAIStudioProvider - - -def test_validate_provider_credentials(): - provider = AzureAIStudioProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={"api_key": os.getenv("AZURE_AI_STUDIO_API_KEY"), "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE")} - ) diff --git a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_rerank.py b/api/tests/integration_tests/model_runtime/azure_ai_studio/test_rerank.py deleted file mode 100644 index 466facc5ff..0000000000 --- a/api/tests/integration_tests/model_runtime/azure_ai_studio/test_rerank.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.azure_ai_studio.rerank.rerank import AzureAIStudioRerankModel - - -def test_validate_credentials(): - model = AzureAIStudioRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="azure-ai-studio-rerank-v1", - credentials={"api_key": "invalid_key", "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE")}, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - - -def test_invoke_model(): - model = AzureAIStudioRerankModel() - - result = model.invoke( - model="azure-ai-studio-rerank-v1", - credentials={ - "api_key": os.getenv("AZURE_AI_STUDIO_JWT_TOKEN"), - "api_base": os.getenv("AZURE_AI_STUDIO_API_BASE"), - }, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 1 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/azure_openai/__init__.py b/api/tests/integration_tests/model_runtime/azure_openai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/azure_openai/test_llm.py b/api/tests/integration_tests/model_runtime/azure_openai/test_llm.py deleted file mode 100644 index 8f50ebf7a6..0000000000 --- a/api/tests/integration_tests/model_runtime/azure_openai/test_llm.py +++ /dev/null @@ -1,290 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.azure_openai.llm.llm import AzureOpenAILargeLanguageModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="gpt35", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": "invalid_key", - "base_model_name": "gpt-35-turbo", - }, - ) - - model.validate_credentials( - model="gpt35", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo", - }, - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_validate_credentials_for_completion_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="gpt-35-turbo-instruct", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": "invalid_key", - "base_model_name": "gpt-35-turbo-instruct", - }, - ) - - model.validate_credentials( - model="gpt-35-turbo-instruct", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo-instruct", - }, - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_invoke_completion_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-35-turbo-instruct", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo-instruct", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 1}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_invoke_stream_completion_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-35-turbo-instruct", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo-instruct", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt35", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.0, - "top_p": 1.0, - "presence_penalty": 0.0, - "frequency_penalty": 0.0, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_chat_model(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt35", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_vision(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-4v", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-4-vision-preview", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content=[ - TextPromptMessageContent( - data="Hello World!", - ), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_tools(setup_openai_mock): - model = AzureOpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-35-turbo", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "gpt-35-turbo", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in London?", - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - PromptMessageTool( - name="get_stock_price", - description="Get the current stock price", - parameters={ - "type": "object", - "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, - "required": ["symbol"], - }, - ), - ], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 - - -def test_get_num_tokens(): - model = AzureOpenAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="gpt-35-turbo-instruct", - credentials={"base_model_name": "gpt-35-turbo-instruct"}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 3 - - num_tokens = model.get_num_tokens( - model="gpt35", - credentials={"base_model_name": "gpt-35-turbo"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/azure_openai/test_text_embedding.py b/api/tests/integration_tests/model_runtime/azure_openai/test_text_embedding.py deleted file mode 100644 index a1ae2b2e5b..0000000000 --- a/api/tests/integration_tests/model_runtime/azure_openai/test_text_embedding.py +++ /dev/null @@ -1,62 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.azure_openai.text_embedding.text_embedding import AzureOpenAITextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_validate_credentials(setup_openai_mock): - model = AzureOpenAITextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="embedding", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": "invalid_key", - "base_model_name": "text-embedding-ada-002", - }, - ) - - model.validate_credentials( - model="embedding", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "text-embedding-ada-002", - }, - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = AzureOpenAITextEmbeddingModel() - - result = model.invoke( - model="embedding", - credentials={ - "openai_api_base": os.environ.get("AZURE_OPENAI_API_BASE"), - "openai_api_key": os.environ.get("AZURE_OPENAI_API_KEY"), - "base_model_name": "text-embedding-ada-002", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = AzureOpenAITextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="embedding", credentials={"base_model_name": "text-embedding-ada-002"}, texts=["hello", "world"] - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/baichuan/__init__.py b/api/tests/integration_tests/model_runtime/baichuan/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/baichuan/test_llm.py b/api/tests/integration_tests/model_runtime/baichuan/test_llm.py deleted file mode 100644 index fe7fe96891..0000000000 --- a/api/tests/integration_tests/model_runtime/baichuan/test_llm.py +++ /dev/null @@ -1,172 +0,0 @@ -import os -from collections.abc import Generator -from time import sleep - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.baichuan.llm.llm import BaichuanLanguageModel - - -def test_predefined_models(): - model = BaichuanLanguageModel() - model_schemas = model.predefined_models() - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -def test_validate_credentials_for_chat_model(): - sleep(3) - model = BaichuanLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="baichuan2-turbo", credentials={"api_key": "invalid_key", "secret_key": "invalid_key"} - ) - - model.validate_credentials( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - ) - - -def test_invoke_model(): - sleep(3) - model = BaichuanLanguageModel() - - response = model.invoke( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_model_with_system_message(): - sleep(3) - model = BaichuanLanguageModel() - - response = model.invoke( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - prompt_messages=[ - SystemPromptMessage(content="请记住你是Kasumi。"), - UserPromptMessage(content="现在告诉我你是谁?"), - ], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_model(): - sleep(3) - model = BaichuanLanguageModel() - - response = model.invoke( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_with_search(): - sleep(3) - model = BaichuanLanguageModel() - - response = model.invoke( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="北京今天的天气怎么样")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - "with_search_enhance": True, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - total_message = "" - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if not chunk.delta.finish_reason else True - total_message += chunk.delta.message.content - - assert "不" not in total_message - - -def test_get_num_tokens(): - sleep(3) - model = BaichuanLanguageModel() - - response = model.get_num_tokens( - model="baichuan2-turbo", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - "secret_key": os.environ.get("BAICHUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - tools=[], - ) - - assert isinstance(response, int) - assert response == 9 diff --git a/api/tests/integration_tests/model_runtime/baichuan/test_provider.py b/api/tests/integration_tests/model_runtime/baichuan/test_provider.py deleted file mode 100644 index 4036edfb7a..0000000000 --- a/api/tests/integration_tests/model_runtime/baichuan/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.baichuan.baichuan import BaichuanProvider - - -def test_validate_provider_credentials(): - provider = BaichuanProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"api_key": "hahahaha"}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("BAICHUAN_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/baichuan/test_text_embedding.py b/api/tests/integration_tests/model_runtime/baichuan/test_text_embedding.py deleted file mode 100644 index cbc63f3978..0000000000 --- a/api/tests/integration_tests/model_runtime/baichuan/test_text_embedding.py +++ /dev/null @@ -1,87 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.baichuan.text_embedding.text_embedding import BaichuanTextEmbeddingModel - - -def test_validate_credentials(): - model = BaichuanTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="baichuan-text-embedding", credentials={"api_key": "invalid_key"}) - - model.validate_credentials( - model="baichuan-text-embedding", credentials={"api_key": os.environ.get("BAICHUAN_API_KEY")} - ) - - -def test_invoke_model(): - model = BaichuanTextEmbeddingModel() - - result = model.invoke( - model="baichuan-text-embedding", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 6 - - -def test_get_num_tokens(): - model = BaichuanTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="baichuan-text-embedding", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 - - -def test_max_chunks(): - model = BaichuanTextEmbeddingModel() - - result = model.invoke( - model="baichuan-text-embedding", - credentials={ - "api_key": os.environ.get("BAICHUAN_API_KEY"), - }, - texts=[ - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - ], - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 22 diff --git a/api/tests/integration_tests/model_runtime/bedrock/__init__.py b/api/tests/integration_tests/model_runtime/bedrock/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/bedrock/test_llm.py b/api/tests/integration_tests/model_runtime/bedrock/test_llm.py deleted file mode 100644 index c19ec35a6e..0000000000 --- a/api/tests/integration_tests/model_runtime/bedrock/test_llm.py +++ /dev/null @@ -1,103 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.bedrock.llm.llm import BedrockLargeLanguageModel - - -def test_validate_credentials(): - model = BedrockLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="meta.llama2-13b-chat-v1", credentials={"anthropic_api_key": "invalid_key"}) - - model.validate_credentials( - model="meta.llama2-13b-chat-v1", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - ) - - -def test_invoke_model(): - model = BedrockLargeLanguageModel() - - response = model.invoke( - model="meta.llama2-13b-chat-v1", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "top_p": 1.0, "max_tokens_to_sample": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = BedrockLargeLanguageModel() - - response = model.invoke( - model="meta.llama2-13b-chat-v1", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens_to_sample": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - print(chunk) - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = BedrockLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="meta.llama2-13b-chat-v1", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 18 diff --git a/api/tests/integration_tests/model_runtime/bedrock/test_provider.py b/api/tests/integration_tests/model_runtime/bedrock/test_provider.py deleted file mode 100644 index 080727829e..0000000000 --- a/api/tests/integration_tests/model_runtime/bedrock/test_provider.py +++ /dev/null @@ -1,21 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider - - -def test_validate_provider_credentials(): - provider = BedrockProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/chatglm/__init__.py b/api/tests/integration_tests/model_runtime/chatglm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/chatglm/test_llm.py b/api/tests/integration_tests/model_runtime/chatglm/test_llm.py deleted file mode 100644 index 418e88874d..0000000000 --- a/api/tests/integration_tests/model_runtime/chatglm/test_llm.py +++ /dev/null @@ -1,230 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.chatglm.llm.llm import ChatGLMLargeLanguageModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -def test_predefined_models(): - model = ChatGLMLargeLanguageModel() - model_schemas = model.predefined_models() - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock): - model = ChatGLMLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="chatglm2-6b", credentials={"api_base": "invalid_key"}) - - model.validate_credentials(model="chatglm2-6b", credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = ChatGLMLargeLanguageModel() - - response = model.invoke( - model="chatglm2-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_model(setup_openai_mock): - model = ChatGLMLargeLanguageModel() - - response = model.invoke( - model="chatglm2-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_model_with_functions(setup_openai_mock): - model = ChatGLMLargeLanguageModel() - - response = model.invoke( - model="chatglm3-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[ - SystemPromptMessage( - content="你是一个天气机器人,你不知道今天的天气怎么样,你需要通过调用一个函数来获取天气信息。" - ), - UserPromptMessage(content="波士顿天气如何?"), - ], - model_parameters={ - "temperature": 0, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=True, - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(response, Generator) - - call: LLMResultChunk = None - chunks = [] - - for chunk in response: - chunks.append(chunk) - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - if chunk.delta.message.tool_calls and len(chunk.delta.message.tool_calls) > 0: - call = chunk - break - - assert call is not None - assert call.delta.message.tool_calls[0].function.name == "get_current_weather" - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_model_with_functions(setup_openai_mock): - model = ChatGLMLargeLanguageModel() - - response = model.invoke( - model="chatglm3-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[UserPromptMessage(content="What is the weather like in San Francisco?")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - assert response.message.tool_calls[0].function.name == "get_current_weather" - - -def test_get_num_tokens(): - model = ChatGLMLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="chatglm2-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 77 - - num_tokens = model.get_num_tokens( - model="chatglm2-6b", - credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/chatglm/test_provider.py b/api/tests/integration_tests/model_runtime/chatglm/test_provider.py deleted file mode 100644 index 7907805d07..0000000000 --- a/api/tests/integration_tests/model_runtime/chatglm/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.chatglm.chatglm import ChatGLMProvider -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_provider_credentials(setup_openai_mock): - provider = ChatGLMProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"api_base": "hahahaha"}) - - provider.validate_provider_credentials(credentials={"api_base": os.environ.get("CHATGLM_API_BASE")}) diff --git a/api/tests/integration_tests/model_runtime/cohere/__init__.py b/api/tests/integration_tests/model_runtime/cohere/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/cohere/test_llm.py b/api/tests/integration_tests/model_runtime/cohere/test_llm.py deleted file mode 100644 index b7f707e935..0000000000 --- a/api/tests/integration_tests/model_runtime/cohere/test_llm.py +++ /dev/null @@ -1,191 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.cohere.llm.llm import CohereLargeLanguageModel - - -def test_validate_credentials_for_chat_model(): - model = CohereLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="command-light-chat", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="command-light-chat", credentials={"api_key": os.environ.get("COHERE_API_KEY")}) - - -def test_validate_credentials_for_completion_model(): - model = CohereLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="command-light", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="command-light", credentials={"api_key": os.environ.get("COHERE_API_KEY")}) - - -def test_invoke_completion_model(): - model = CohereLargeLanguageModel() - - credentials = {"api_key": os.environ.get("COHERE_API_KEY")} - - result = model.invoke( - model="command-light", - credentials=credentials, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 1}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - assert model._num_tokens_from_string("command-light", credentials, result.message.content) == 1 - - -def test_invoke_stream_completion_model(): - model = CohereLargeLanguageModel() - - result = model.invoke( - model="command-light", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_chat_model(): - model = CohereLargeLanguageModel() - - result = model.invoke( - model="command-light-chat", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.0, - "p": 0.99, - "presence_penalty": 0.0, - "frequency_penalty": 0.0, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -def test_invoke_stream_chat_model(): - model = CohereLargeLanguageModel() - - result = model.invoke( - model="command-light-chat", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -def test_get_num_tokens(): - model = CohereLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="command-light", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 3 - - num_tokens = model.get_num_tokens( - model="command-light-chat", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 15 - - -def test_fine_tuned_model(): - model = CohereLargeLanguageModel() - - # test invoke - result = model.invoke( - model="85ec47be-6139-4f75-a4be-0f0ec1ef115c-ft", - credentials={"api_key": os.environ.get("COHERE_API_KEY"), "mode": "completion"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - - -def test_fine_tuned_chat_model(): - model = CohereLargeLanguageModel() - - # test invoke - result = model.invoke( - model="94f2d55a-4c79-4c00-bde4-23962e74b170-ft", - credentials={"api_key": os.environ.get("COHERE_API_KEY"), "mode": "chat"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) diff --git a/api/tests/integration_tests/model_runtime/cohere/test_provider.py b/api/tests/integration_tests/model_runtime/cohere/test_provider.py deleted file mode 100644 index fb7e6d3498..0000000000 --- a/api/tests/integration_tests/model_runtime/cohere/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.cohere.cohere import CohereProvider - - -def test_validate_provider_credentials(): - provider = CohereProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("COHERE_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/cohere/test_rerank.py b/api/tests/integration_tests/model_runtime/cohere/test_rerank.py deleted file mode 100644 index a1b6922128..0000000000 --- a/api/tests/integration_tests/model_runtime/cohere/test_rerank.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.cohere.rerank.rerank import CohereRerankModel - - -def test_validate_credentials(): - model = CohereRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="rerank-english-v2.0", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="rerank-english-v2.0", credentials={"api_key": os.environ.get("COHERE_API_KEY")}) - - -def test_invoke_model(): - model = CohereRerankModel() - - result = model.invoke( - model="rerank-english-v2.0", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) " - "is the capital of the United States. It is a federal district. The President of the USA and many major " - "national government offices are in the territory. This makes it the political center of the United " - "States of America.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 1 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/cohere/test_text_embedding.py b/api/tests/integration_tests/model_runtime/cohere/test_text_embedding.py deleted file mode 100644 index ae26d36635..0000000000 --- a/api/tests/integration_tests/model_runtime/cohere/test_text_embedding.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.cohere.text_embedding.text_embedding import CohereTextEmbeddingModel - - -def test_validate_credentials(): - model = CohereTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="embed-multilingual-v3.0", credentials={"api_key": "invalid_key"}) - - model.validate_credentials( - model="embed-multilingual-v3.0", credentials={"api_key": os.environ.get("COHERE_API_KEY")} - ) - - -def test_invoke_model(): - model = CohereTextEmbeddingModel() - - result = model.invoke( - model="embed-multilingual-v3.0", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - assert result.usage.total_tokens == 811 - - -def test_get_num_tokens(): - model = CohereTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="embed-multilingual-v3.0", - credentials={"api_key": os.environ.get("COHERE_API_KEY")}, - texts=["hello", "world"], - ) - - assert num_tokens == 3 diff --git a/api/tests/integration_tests/model_runtime/fireworks/__init__.py b/api/tests/integration_tests/model_runtime/fireworks/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_llm.py b/api/tests/integration_tests/model_runtime/fireworks/test_llm.py deleted file mode 100644 index 699ca293a2..0000000000 --- a/api/tests/integration_tests/model_runtime/fireworks/test_llm.py +++ /dev/null @@ -1,186 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.fireworks.llm.llm import FireworksLargeLanguageModel - -"""FOR MOCK FIXTURES, DO NOT REMOVE""" -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -def test_predefined_models(): - model = FireworksLargeLanguageModel() - model_schemas = model.predefined_models() - - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock): - model = FireworksLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - # model name to gpt-3.5-turbo because of mocking - model.validate_credentials(model="gpt-3.5-turbo", credentials={"fireworks_api_key": "invalid_key"}) - - model.validate_credentials( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model(setup_openai_mock): - model = FireworksLargeLanguageModel() - - result = model.invoke( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.0, - "top_p": 1.0, - "presence_penalty": 0.0, - "frequency_penalty": 0.0, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="foo", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_tools(setup_openai_mock): - model = FireworksLargeLanguageModel() - - result = model.invoke( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in London?", - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - PromptMessageTool( - name="get_stock_price", - description="Get the current stock price", - parameters={ - "type": "object", - "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, - "required": ["symbol"], - }, - ), - ], - stream=False, - user="foo", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_chat_model(setup_openai_mock): - model = FireworksLargeLanguageModel() - - result = model.invoke( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="foo", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -def test_get_num_tokens(): - model = FireworksLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 10 - - num_tokens = model.get_num_tokens( - model="accounts/fireworks/models/llama-v3p1-8b-instruct", - credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - ], - ) - - assert num_tokens == 77 diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_provider.py b/api/tests/integration_tests/model_runtime/fireworks/test_provider.py deleted file mode 100644 index a68cf1a1a8..0000000000 --- a/api/tests/integration_tests/model_runtime/fireworks/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.fireworks.fireworks import FireworksProvider -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_provider_credentials(setup_openai_mock): - provider = FireworksProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py b/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py deleted file mode 100644 index 7bf723b3a9..0000000000 --- a/api/tests/integration_tests/model_runtime/fireworks/test_text_embedding.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.fireworks.text_embedding.text_embedding import FireworksTextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_validate_credentials(setup_openai_mock): - model = FireworksTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": "invalid_key"} - ) - - model.validate_credentials( - model="nomic-ai/nomic-embed-text-v1.5", credentials={"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = FireworksTextEmbeddingModel() - - result = model.invoke( - model="nomic-ai/nomic-embed-text-v1.5", - credentials={ - "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"), - }, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="foo", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = FireworksTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="nomic-ai/nomic-embed-text-v1.5", - credentials={ - "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/fishaudio/__init__.py b/api/tests/integration_tests/model_runtime/fishaudio/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/fishaudio/test_provider.py b/api/tests/integration_tests/model_runtime/fishaudio/test_provider.py deleted file mode 100644 index 3526574b61..0000000000 --- a/api/tests/integration_tests/model_runtime/fishaudio/test_provider.py +++ /dev/null @@ -1,33 +0,0 @@ -import os - -import httpx -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.fishaudio.fishaudio import FishAudioProvider -from tests.integration_tests.model_runtime.__mock.fishaudio import setup_fishaudio_mock - - -@pytest.mark.parametrize("setup_fishaudio_mock", [["list-models"]], indirect=True) -def test_validate_provider_credentials(setup_fishaudio_mock): - print("-----", httpx.get) - provider = FishAudioProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials( - credentials={ - "api_key": "bad_api_key", - "api_base": os.environ.get("FISH_AUDIO_API_BASE", "https://api.fish.audio"), - "use_public_models": "false", - "latency": "normal", - } - ) - - provider.validate_provider_credentials( - credentials={ - "api_key": os.environ.get("FISH_AUDIO_API_KEY", "test"), - "api_base": os.environ.get("FISH_AUDIO_API_BASE", "https://api.fish.audio"), - "use_public_models": "false", - "latency": "normal", - } - ) diff --git a/api/tests/integration_tests/model_runtime/fishaudio/test_tts.py b/api/tests/integration_tests/model_runtime/fishaudio/test_tts.py deleted file mode 100644 index f61fee28b9..0000000000 --- a/api/tests/integration_tests/model_runtime/fishaudio/test_tts.py +++ /dev/null @@ -1,32 +0,0 @@ -import os - -import pytest - -from core.model_runtime.model_providers.fishaudio.tts.tts import ( - FishAudioText2SpeechModel, -) -from tests.integration_tests.model_runtime.__mock.fishaudio import setup_fishaudio_mock - - -@pytest.mark.parametrize("setup_fishaudio_mock", [["tts"]], indirect=True) -def test_invoke_model(setup_fishaudio_mock): - model = FishAudioText2SpeechModel() - - result = model.invoke( - model="tts-default", - tenant_id="test", - credentials={ - "api_key": os.environ.get("FISH_AUDIO_API_KEY", "test"), - "api_base": os.environ.get("FISH_AUDIO_API_BASE", "https://api.fish.audio"), - "use_public_models": "false", - "latency": "normal", - }, - content_text="Hello, world!", - voice="03397b4c4be74759b72533b663fbd001", - ) - - content = b"" - for chunk in result: - content += chunk - - assert content != b"" diff --git a/api/tests/integration_tests/model_runtime/google/__init__.py b/api/tests/integration_tests/model_runtime/google/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/google/test_llm.py b/api/tests/integration_tests/model_runtime/google/test_llm.py deleted file mode 100644 index 34d08f270a..0000000000 --- a/api/tests/integration_tests/model_runtime/google/test_llm.py +++ /dev/null @@ -1,177 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.google.llm.llm import GoogleLargeLanguageModel -from tests.integration_tests.model_runtime.__mock.google import setup_google_mock - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_google_mock): - model = GoogleLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="gemini-pro", credentials={"google_api_key": "invalid_key"}) - - model.validate_credentials(model="gemini-pro", credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}) - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_invoke_model(setup_google_mock): - model = GoogleLargeLanguageModel() - - response = model.invoke( - model="gemini-pro", - credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Give me your worst dad joke or i will unplug you"), - AssistantPromptMessage( - content="Why did the scarecrow win an award? Because he was outstanding in his field!" - ), - UserPromptMessage( - content=[ - TextPromptMessageContent(data="ok something snarkier pls"), - TextPromptMessageContent(data="i may still unplug you"), - ] - ), - ], - model_parameters={"temperature": 0.5, "top_p": 1.0, "max_tokens_to_sample": 2048}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_invoke_stream_model(setup_google_mock): - model = GoogleLargeLanguageModel() - - response = model.invoke( - model="gemini-pro", - credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Give me your worst dad joke or i will unplug you"), - AssistantPromptMessage( - content="Why did the scarecrow win an award? Because he was outstanding in his field!" - ), - UserPromptMessage( - content=[ - TextPromptMessageContent(data="ok something snarkier pls"), - TextPromptMessageContent(data="i may still unplug you"), - ] - ), - ], - model_parameters={"temperature": 0.2, "top_k": 5, "max_tokens_to_sample": 2048}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_invoke_chat_model_with_vision(setup_google_mock): - model = GoogleLargeLanguageModel() - - result = model.invoke( - model="gemini-pro-vision", - credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content=[ - TextPromptMessageContent(data="what do you see?"), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ), - ], - model_parameters={"temperature": 0.3, "top_p": 0.2, "top_k": 3, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_invoke_chat_model_with_vision_multi_pics(setup_google_mock): - model = GoogleLargeLanguageModel() - - result = model.invoke( - model="gemini-pro-vision", - credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage(content="You are a helpful AI assistant."), - UserPromptMessage( - content=[ - TextPromptMessageContent(data="what do you see?"), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ), - AssistantPromptMessage(content="I see a blue letter 'D' with a gradient from light blue to dark blue."), - UserPromptMessage( - content=[ - TextPromptMessageContent(data="what about now?"), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAAABAAAAAQBPJcTWAAADl0lEQVR4nC3Uf0zUdRjA8S9W6w//bGs1DUd5RT+gIY0oYeEqY0QCy5EbAnF4IEgyAnGuCBANWOjih6YOlK0BbtLAX+iAENFgUBLMkzs8uDuO+wEcxx3cgdx9v3fvvn/0x+v5PM+z56/n2T6CIAgIQUEECVsICnqOoC0v8PyLW3n5lW28GhLG9hAFwYowdoRsJ+Tzv3hdEcpOxVvsfDscheI1BIXKy5t7OwiPiCI8IZaIL+OISPKxK/IDdiU6ifwqjqj4WKISP5VN8mHSFNHJA7KnfJQYh7A7+g1i9hXw2dcX2JuSxhcJnxCfnEJ8ygESqtfYl3qA5O/1pKaX8E2Rn7R0JWnKXFkRaX0OhIOqUtJVRWQoj5ChyiOjb4XMQ0fIVB0lM6eEzMO5ZN5x8W1xD1nZh1Fm55OtzOdQTgEqZR6CSi5UjSI5hTnk3bWSX/gj+ccaKCgspaDkNIWlpygc3OTYtZc4fqKcE5Vn+eFkDWUp8ZS1ryOUn66lvGmCyt/8nLwxTlXZcapqL1Nd10B1Uy01FbnUnFVS+2sLvzTWUXfRRMOAgcb6KhovdSA0XnHRdL6Zcy1/0lyTS3NfgJbWNq6cu0nrPyu0FSlpu9pF21037ZFhXLtYT+eNIbp61+jq70bofv8drvf0c2vQz+3O3+nRrNI78JD+/psMfLefe0MG7p+a5v6tP3g48ojhC7mMXP2Y0YoZRitnEcbkMPaglzEnPAoNZrw4hXH1LBOtOiYfa3gcugO1+gnqZwGeaHRMTcyhaduKRjOBxiJfQSsnWq0W7YwVrd3PtH6BaeMST40adJ3V6OwBZlR7mNUvMWswYsiKxTA1gWHOgsGiRzCmRGOcW8QoD855JObWJUxmHSb5nfd4Mc+ZMFv1MjtmuWepSMNiMmAxz2LN2o1gbdmDdV6NdVnE1p6EzajHZp7BtjCLbSnAgsMtE1k8H8OiwyuTWPL4sLduwz5vRLA7XCzbLCw7PTiswzgWJnBsijhNwzhtw6xmRLLmdLC27sU9dBC324un/iieSyF4rPIS1/8eZOOego0NL898Epv14Wz2nMHrsOB12/Glh+Mrfg/fqgufKCHmxSC21SE6JxFdKwjihhFxw4O4aUf0bSKVRyN1pyKNXEcaDUbS3EZan5Sp/zeFtLGO5LUiSRKCJAXwZ0bg73oXv+kBfrsOv8uOXxIJ/JRG4N/9sjME1B3QXAjzd8CqhqWfkT8C4T8Z5+ciRtwo8gAAAABJRU5ErkJggg==" - ), - ] - ), - ], - model_parameters={"temperature": 0.3, "top_p": 0.2, "top_k": 3, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - print(f"result: {result.message.content}") - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -def test_get_num_tokens(): - model = GoogleLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="gemini-pro", - credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens > 0 # The exact number of tokens may vary based on the model's tokenization diff --git a/api/tests/integration_tests/model_runtime/google/test_provider.py b/api/tests/integration_tests/model_runtime/google/test_provider.py deleted file mode 100644 index c217e4fe05..0000000000 --- a/api/tests/integration_tests/model_runtime/google/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.google.google import GoogleProvider -from tests.integration_tests.model_runtime.__mock.google import setup_google_mock - - -@pytest.mark.parametrize("setup_google_mock", [["none"]], indirect=True) -def test_validate_provider_credentials(setup_google_mock): - provider = GoogleProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"google_api_key": os.environ.get("GOOGLE_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/huggingface_hub/__init__.py b/api/tests/integration_tests/model_runtime/huggingface_hub/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py b/api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py deleted file mode 100644 index 6a6cc874fa..0000000000 --- a/api/tests/integration_tests/model_runtime/huggingface_hub/test_llm.py +++ /dev/null @@ -1,277 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.huggingface_hub.llm.llm import HuggingfaceHubLargeLanguageModel -from tests.integration_tests.model_runtime.__mock.huggingface import setup_huggingface_mock - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_hosted_inference_api_validate_credentials(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="HuggingFaceH4/zephyr-7b-beta", - credentials={"huggingfacehub_api_type": "hosted_inference_api", "huggingfacehub_api_token": "invalid_key"}, - ) - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="fake-model", - credentials={"huggingfacehub_api_type": "hosted_inference_api", "huggingfacehub_api_token": "invalid_key"}, - ) - - model.validate_credentials( - model="HuggingFaceH4/zephyr-7b-beta", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - }, - ) - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_hosted_inference_api_invoke_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="HuggingFaceH4/zephyr-7b-beta", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_hosted_inference_api_invoke_stream_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="HuggingFaceH4/zephyr-7b-beta", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text_generation_validate_credentials(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="openchat/openchat_3.5", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": "invalid_key", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT_GEN_ENDPOINT_URL"), - "task_type": "text-generation", - }, - ) - - model.validate_credentials( - model="openchat/openchat_3.5", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT_GEN_ENDPOINT_URL"), - "task_type": "text-generation", - }, - ) - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text_generation_invoke_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="openchat/openchat_3.5", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT_GEN_ENDPOINT_URL"), - "task_type": "text-generation", - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text_generation_invoke_stream_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="openchat/openchat_3.5", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT_GEN_ENDPOINT_URL"), - "task_type": "text-generation", - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text2text_generation_validate_credentials(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="google/mt5-base", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": "invalid_key", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL"), - "task_type": "text2text-generation", - }, - ) - - model.validate_credentials( - model="google/mt5-base", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL"), - "task_type": "text2text-generation", - }, - ) - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text2text_generation_invoke_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="google/mt5-base", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL"), - "task_type": "text2text-generation", - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -@pytest.mark.parametrize("setup_huggingface_mock", [["none"]], indirect=True) -def test_inference_endpoints_text2text_generation_invoke_stream_model(setup_huggingface_mock): - model = HuggingfaceHubLargeLanguageModel() - - response = model.invoke( - model="google/mt5-base", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL"), - "task_type": "text2text-generation", - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = HuggingfaceHubLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="google/mt5-base", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_TEXT2TEXT_GEN_ENDPOINT_URL"), - "task_type": "text2text-generation", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 7 diff --git a/api/tests/integration_tests/model_runtime/huggingface_hub/test_text_embedding.py b/api/tests/integration_tests/model_runtime/huggingface_hub/test_text_embedding.py deleted file mode 100644 index 0ee593f38a..0000000000 --- a/api/tests/integration_tests/model_runtime/huggingface_hub/test_text_embedding.py +++ /dev/null @@ -1,112 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import ( - HuggingfaceHubTextEmbeddingModel, -) - - -def test_hosted_inference_api_validate_credentials(): - model = HuggingfaceHubTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="facebook/bart-base", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": "invalid_key", - }, - ) - - model.validate_credentials( - model="facebook/bart-base", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - }, - ) - - -def test_hosted_inference_api_invoke_model(): - model = HuggingfaceHubTextEmbeddingModel() - - result = model.invoke( - model="facebook/bart-base", - credentials={ - "huggingfacehub_api_type": "hosted_inference_api", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_inference_endpoints_validate_credentials(): - model = HuggingfaceHubTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="all-MiniLM-L6-v2", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": "invalid_key", - "huggingface_namespace": "Dify-AI", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL"), - "task_type": "feature-extraction", - }, - ) - - model.validate_credentials( - model="all-MiniLM-L6-v2", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingface_namespace": "Dify-AI", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL"), - "task_type": "feature-extraction", - }, - ) - - -def test_inference_endpoints_invoke_model(): - model = HuggingfaceHubTextEmbeddingModel() - - result = model.invoke( - model="all-MiniLM-L6-v2", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingface_namespace": "Dify-AI", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL"), - "task_type": "feature-extraction", - }, - texts=["hello", "world"], - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 0 - - -def test_get_num_tokens(): - model = HuggingfaceHubTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="all-MiniLM-L6-v2", - credentials={ - "huggingfacehub_api_type": "inference_endpoints", - "huggingfacehub_api_token": os.environ.get("HUGGINGFACE_API_KEY"), - "huggingface_namespace": "Dify-AI", - "huggingfacehub_endpoint_url": os.environ.get("HUGGINGFACE_EMBEDDINGS_ENDPOINT_URL"), - "task_type": "feature-extraction", - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/__init__.py b/api/tests/integration_tests/model_runtime/huggingface_tei/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py b/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py deleted file mode 100644 index b1fa9d5ca5..0000000000 --- a/api/tests/integration_tests/model_runtime/huggingface_tei/test_embeddings.py +++ /dev/null @@ -1,70 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.huggingface_tei.text_embedding.text_embedding import ( - HuggingfaceTeiTextEmbeddingModel, - TeiHelper, -) -from tests.integration_tests.model_runtime.__mock.huggingface_tei import MockTEIClass - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_tei_mock(request, monkeypatch: pytest.MonkeyPatch): - if MOCK: - monkeypatch.setattr(TeiHelper, "get_tei_extra_parameter", MockTEIClass.get_tei_extra_parameter) - monkeypatch.setattr(TeiHelper, "invoke_tokenize", MockTEIClass.invoke_tokenize) - monkeypatch.setattr(TeiHelper, "invoke_embeddings", MockTEIClass.invoke_embeddings) - monkeypatch.setattr(TeiHelper, "invoke_rerank", MockTEIClass.invoke_rerank) - yield - - if MOCK: - monkeypatch.undo() - - -@pytest.mark.parametrize("setup_tei_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_tei_mock): - model = HuggingfaceTeiTextEmbeddingModel() - # model name is only used in mock - model_name = "embedding" - - if MOCK: - # TEI Provider will check model type by API endpoint, at real server, the model type is correct. - # So we dont need to check model type here. Only check in mock - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="reranker", - credentials={ - "server_url": os.environ.get("TEI_EMBEDDING_SERVER_URL", ""), - }, - ) - - model.validate_credentials( - model=model_name, - credentials={ - "server_url": os.environ.get("TEI_EMBEDDING_SERVER_URL", ""), - }, - ) - - -@pytest.mark.parametrize("setup_tei_mock", [["none"]], indirect=True) -def test_invoke_model(setup_tei_mock): - model = HuggingfaceTeiTextEmbeddingModel() - model_name = "embedding" - - result = model.invoke( - model=model_name, - credentials={ - "server_url": os.environ.get("TEI_EMBEDDING_SERVER_URL", ""), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens > 0 diff --git a/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py b/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py deleted file mode 100644 index 45370d9fba..0000000000 --- a/api/tests/integration_tests/model_runtime/huggingface_tei/test_rerank.py +++ /dev/null @@ -1,78 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.huggingface_tei.rerank.rerank import ( - HuggingfaceTeiRerankModel, -) -from core.model_runtime.model_providers.huggingface_tei.text_embedding.text_embedding import TeiHelper -from tests.integration_tests.model_runtime.__mock.huggingface_tei import MockTEIClass - -MOCK = os.getenv("MOCK_SWITCH", "false").lower() == "true" - - -@pytest.fixture -def setup_tei_mock(request, monkeypatch: pytest.MonkeyPatch): - if MOCK: - monkeypatch.setattr(TeiHelper, "get_tei_extra_parameter", MockTEIClass.get_tei_extra_parameter) - monkeypatch.setattr(TeiHelper, "invoke_tokenize", MockTEIClass.invoke_tokenize) - monkeypatch.setattr(TeiHelper, "invoke_embeddings", MockTEIClass.invoke_embeddings) - monkeypatch.setattr(TeiHelper, "invoke_rerank", MockTEIClass.invoke_rerank) - yield - - if MOCK: - monkeypatch.undo() - - -@pytest.mark.parametrize("setup_tei_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_tei_mock): - model = HuggingfaceTeiRerankModel() - # model name is only used in mock - model_name = "reranker" - - if MOCK: - # TEI Provider will check model type by API endpoint, at real server, the model type is correct. - # So we dont need to check model type here. Only check in mock - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="embedding", - credentials={ - "server_url": os.environ.get("TEI_RERANK_SERVER_URL"), - }, - ) - - model.validate_credentials( - model=model_name, - credentials={ - "server_url": os.environ.get("TEI_RERANK_SERVER_URL"), - }, - ) - - -@pytest.mark.parametrize("setup_tei_mock", [["none"]], indirect=True) -def test_invoke_model(setup_tei_mock): - model = HuggingfaceTeiRerankModel() - # model name is only used in mock - model_name = "reranker" - - result = model.invoke( - model=model_name, - credentials={ - "server_url": os.environ.get("TEI_RERANK_SERVER_URL"), - }, - query="Who is Kasumi?", - docs=[ - 'Kasumi is a girl\'s name of Japanese origin meaning "mist".', - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ", - "and she leads a team named PopiParty.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 0 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/hunyuan/__init__.py b/api/tests/integration_tests/model_runtime/hunyuan/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/hunyuan/test_llm.py b/api/tests/integration_tests/model_runtime/hunyuan/test_llm.py deleted file mode 100644 index b3049a06d9..0000000000 --- a/api/tests/integration_tests/model_runtime/hunyuan/test_llm.py +++ /dev/null @@ -1,90 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.hunyuan.llm.llm import HunyuanLargeLanguageModel - - -def test_validate_credentials(): - model = HunyuanLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="hunyuan-standard", credentials={"secret_id": "invalid_key", "secret_key": "invalid_key"} - ) - - model.validate_credentials( - model="hunyuan-standard", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - ) - - -def test_invoke_model(): - model = HunyuanLargeLanguageModel() - - response = model.invoke( - model="hunyuan-standard", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hi")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = HunyuanLargeLanguageModel() - - response = model.invoke( - model="hunyuan-standard", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hi")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = HunyuanLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="hunyuan-standard", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 14 diff --git a/api/tests/integration_tests/model_runtime/hunyuan/test_provider.py b/api/tests/integration_tests/model_runtime/hunyuan/test_provider.py deleted file mode 100644 index e3748c2ce7..0000000000 --- a/api/tests/integration_tests/model_runtime/hunyuan/test_provider.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.hunyuan.hunyuan import HunyuanProvider - - -def test_validate_provider_credentials(): - provider = HunyuanProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"secret_id": "invalid_key", "secret_key": "invalid_key"}) - - provider.validate_provider_credentials( - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/hunyuan/test_text_embedding.py b/api/tests/integration_tests/model_runtime/hunyuan/test_text_embedding.py deleted file mode 100644 index 69d14dffee..0000000000 --- a/api/tests/integration_tests/model_runtime/hunyuan/test_text_embedding.py +++ /dev/null @@ -1,96 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.hunyuan.text_embedding.text_embedding import HunyuanTextEmbeddingModel - - -def test_validate_credentials(): - model = HunyuanTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="hunyuan-embedding", credentials={"secret_id": "invalid_key", "secret_key": "invalid_key"} - ) - - model.validate_credentials( - model="hunyuan-embedding", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - ) - - -def test_invoke_model(): - model = HunyuanTextEmbeddingModel() - - result = model.invoke( - model="hunyuan-embedding", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 6 - - -def test_get_num_tokens(): - model = HunyuanTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="hunyuan-embedding", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 - - -def test_max_chunks(): - model = HunyuanTextEmbeddingModel() - - result = model.invoke( - model="hunyuan-embedding", - credentials={ - "secret_id": os.environ.get("HUNYUAN_SECRET_ID"), - "secret_key": os.environ.get("HUNYUAN_SECRET_KEY"), - }, - texts=[ - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - "hello", - "world", - ], - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 22 diff --git a/api/tests/integration_tests/model_runtime/jina/__init__.py b/api/tests/integration_tests/model_runtime/jina/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/jina/test_provider.py b/api/tests/integration_tests/model_runtime/jina/test_provider.py deleted file mode 100644 index e3b6128c59..0000000000 --- a/api/tests/integration_tests/model_runtime/jina/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.jina.jina import JinaProvider - - -def test_validate_provider_credentials(): - provider = JinaProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"api_key": "hahahaha"}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("JINA_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/jina/test_text_embedding.py b/api/tests/integration_tests/model_runtime/jina/test_text_embedding.py deleted file mode 100644 index 290735ec49..0000000000 --- a/api/tests/integration_tests/model_runtime/jina/test_text_embedding.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.jina.text_embedding.text_embedding import JinaTextEmbeddingModel - - -def test_validate_credentials(): - model = JinaTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="jina-embeddings-v2-base-en", credentials={"api_key": "invalid_key"}) - - model.validate_credentials( - model="jina-embeddings-v2-base-en", credentials={"api_key": os.environ.get("JINA_API_KEY")} - ) - - -def test_invoke_model(): - model = JinaTextEmbeddingModel() - - result = model.invoke( - model="jina-embeddings-v2-base-en", - credentials={ - "api_key": os.environ.get("JINA_API_KEY"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 6 - - -def test_get_num_tokens(): - model = JinaTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="jina-embeddings-v2-base-en", - credentials={ - "api_key": os.environ.get("JINA_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 6 diff --git a/api/tests/integration_tests/model_runtime/localai/__init__.py b/api/tests/integration_tests/model_runtime/localai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/localai/test_embedding.py b/api/tests/integration_tests/model_runtime/localai/test_embedding.py deleted file mode 100644 index 7fd9f2b300..0000000000 --- a/api/tests/integration_tests/model_runtime/localai/test_embedding.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -LocalAI Embedding Interface is temporarily unavailable due to -we could not find a way to test it for now. -""" diff --git a/api/tests/integration_tests/model_runtime/localai/test_llm.py b/api/tests/integration_tests/model_runtime/localai/test_llm.py deleted file mode 100644 index aa5436c34f..0000000000 --- a/api/tests/integration_tests/model_runtime/localai/test_llm.py +++ /dev/null @@ -1,174 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import ParameterRule -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.localai.llm.llm import LocalAILanguageModel - - -def test_validate_credentials_for_chat_model(): - model = LocalAILanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="chinese-llama-2-7b", - credentials={ - "server_url": "hahahaha", - "completion_type": "completion", - }, - ) - - model.validate_credentials( - model="chinese-llama-2-7b", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "completion", - }, - ) - - -def test_invoke_completion_model(): - model = LocalAILanguageModel() - - response = model.invoke( - model="chinese-llama-2-7b", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "completion", - }, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10}, - stop=[], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_chat_model(): - model = LocalAILanguageModel() - - response = model.invoke( - model="chinese-llama-2-7b", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "chat_completion", - }, - prompt_messages=[UserPromptMessage(content="ping")], - model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10}, - stop=[], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_completion_model(): - model = LocalAILanguageModel() - - response = model.invoke( - model="chinese-llama-2-7b", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "completion", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10}, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_stream_chat_model(): - model = LocalAILanguageModel() - - response = model.invoke( - model="chinese-llama-2-7b", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "chat_completion", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.7, "top_p": 1.0, "max_tokens": 10}, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = LocalAILanguageModel() - - num_tokens = model.get_num_tokens( - model="????", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "chat_completion", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 77 - - num_tokens = model.get_num_tokens( - model="????", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "chat_completion", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 10 diff --git a/api/tests/integration_tests/model_runtime/localai/test_rerank.py b/api/tests/integration_tests/model_runtime/localai/test_rerank.py deleted file mode 100644 index 13c7df6d14..0000000000 --- a/api/tests/integration_tests/model_runtime/localai/test_rerank.py +++ /dev/null @@ -1,96 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.localai.rerank.rerank import LocalaiRerankModel - - -def test_validate_credentials_for_chat_model(): - model = LocalaiRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="bge-reranker-v2-m3", - credentials={ - "server_url": "hahahaha", - "completion_type": "completion", - }, - ) - - model.validate_credentials( - model="bge-reranker-base", - credentials={ - "server_url": os.environ.get("LOCALAI_SERVER_URL"), - "completion_type": "completion", - }, - ) - - -def test_invoke_rerank_model(): - model = LocalaiRerankModel() - - response = model.invoke( - model="bge-reranker-base", - credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")}, - query="Organic skincare products for sensitive skin", - docs=[ - "Eco-friendly kitchenware for modern homes", - "Biodegradable cleaning supplies for eco-conscious consumers", - "Organic cotton baby clothes for sensitive skin", - "Natural organic skincare range for sensitive skin", - "Tech gadgets for smart homes: 2024 edition", - "Sustainable gardening tools and compost solutions", - "Sensitive skin-friendly facial cleansers and toners", - "Organic food wraps and storage solutions", - "Yoga mats made from recycled materials", - ], - top_n=3, - score_threshold=0.75, - user="abc-123", - ) - - assert isinstance(response, RerankResult) - assert len(response.docs) == 3 - - -def test__invoke(): - model = LocalaiRerankModel() - - # Test case 1: Empty docs - result = model._invoke( - model="bge-reranker-base", - credentials={"server_url": "https://example.com", "api_key": "1234567890"}, - query="Organic skincare products for sensitive skin", - docs=[], - top_n=3, - score_threshold=0.75, - user="abc-123", - ) - assert isinstance(result, RerankResult) - assert len(result.docs) == 0 - - # Test case 2: Valid invocation - result = model._invoke( - model="bge-reranker-base", - credentials={"server_url": "https://example.com", "api_key": "1234567890"}, - query="Organic skincare products for sensitive skin", - docs=[ - "Eco-friendly kitchenware for modern homes", - "Biodegradable cleaning supplies for eco-conscious consumers", - "Organic cotton baby clothes for sensitive skin", - "Natural organic skincare range for sensitive skin", - "Tech gadgets for smart homes: 2024 edition", - "Sustainable gardening tools and compost solutions", - "Sensitive skin-friendly facial cleansers and toners", - "Organic food wraps and storage solutions", - "Yoga mats made from recycled materials", - ], - top_n=3, - score_threshold=0.75, - user="abc-123", - ) - assert isinstance(result, RerankResult) - assert len(result.docs) == 3 - assert all(isinstance(doc, RerankDocument) for doc in result.docs) diff --git a/api/tests/integration_tests/model_runtime/localai/test_speech2text.py b/api/tests/integration_tests/model_runtime/localai/test_speech2text.py deleted file mode 100644 index 91b7a5752c..0000000000 --- a/api/tests/integration_tests/model_runtime/localai/test_speech2text.py +++ /dev/null @@ -1,42 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.localai.speech2text.speech2text import LocalAISpeech2text - - -def test_validate_credentials(): - model = LocalAISpeech2text() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="whisper-1", credentials={"server_url": "invalid_url"}) - - model.validate_credentials(model="whisper-1", credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")}) - - -def test_invoke_model(): - model = LocalAISpeech2text() - - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - - # Get assets directory - assets_dir = os.path.join(os.path.dirname(current_dir), "assets") - - # Construct the path to the audio file - audio_file_path = os.path.join(assets_dir, "audio.mp3") - - # Open the file and get the file object - with open(audio_file_path, "rb") as audio_file: - file = audio_file - - result = model.invoke( - model="whisper-1", - credentials={"server_url": os.environ.get("LOCALAI_SERVER_URL")}, - file=file, - user="abc-123", - ) - - assert isinstance(result, str) - assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10" diff --git a/api/tests/integration_tests/model_runtime/minimax/__init__.py b/api/tests/integration_tests/model_runtime/minimax/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/minimax/test_embedding.py b/api/tests/integration_tests/model_runtime/minimax/test_embedding.py deleted file mode 100644 index cf2a28eb9e..0000000000 --- a/api/tests/integration_tests/model_runtime/minimax/test_embedding.py +++ /dev/null @@ -1,58 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.minimax.text_embedding.text_embedding import MinimaxTextEmbeddingModel - - -def test_validate_credentials(): - model = MinimaxTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="embo-01", - credentials={"minimax_api_key": "invalid_key", "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID")}, - ) - - model.validate_credentials( - model="embo-01", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - ) - - -def test_invoke_model(): - model = MinimaxTextEmbeddingModel() - - result = model.invoke( - model="embo-01", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 16 - - -def test_get_num_tokens(): - model = MinimaxTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="embo-01", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/minimax/test_llm.py b/api/tests/integration_tests/model_runtime/minimax/test_llm.py deleted file mode 100644 index aacde04d32..0000000000 --- a/api/tests/integration_tests/model_runtime/minimax/test_llm.py +++ /dev/null @@ -1,143 +0,0 @@ -import os -from collections.abc import Generator -from time import sleep - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.minimax.llm.llm import MinimaxLargeLanguageModel - - -def test_predefined_models(): - model = MinimaxLargeLanguageModel() - model_schemas = model.predefined_models() - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -def test_validate_credentials_for_chat_model(): - sleep(3) - model = MinimaxLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="abab5.5-chat", credentials={"minimax_api_key": "invalid_key", "minimax_group_id": "invalid_key"} - ) - - model.validate_credentials( - model="abab5.5-chat", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - ) - - -def test_invoke_model(): - sleep(3) - model = MinimaxLargeLanguageModel() - - response = model.invoke( - model="abab5-chat", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_model(): - sleep(3) - model = MinimaxLargeLanguageModel() - - response = model.invoke( - model="abab5.5-chat", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_with_search(): - sleep(3) - model = MinimaxLargeLanguageModel() - - response = model.invoke( - model="abab5.5-chat", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - prompt_messages=[UserPromptMessage(content="北京今天的天气怎么样")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - "plugin_web_search": True, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - total_message = "" - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - total_message += chunk.delta.message.content - assert len(chunk.delta.message.content) > 0 if not chunk.delta.finish_reason else True - - assert "参考资料" in total_message - - -def test_get_num_tokens(): - sleep(3) - model = MinimaxLargeLanguageModel() - - response = model.get_num_tokens( - model="abab5.5-chat", - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - tools=[], - ) - - assert isinstance(response, int) - assert response == 30 diff --git a/api/tests/integration_tests/model_runtime/minimax/test_provider.py b/api/tests/integration_tests/model_runtime/minimax/test_provider.py deleted file mode 100644 index 575ed13eef..0000000000 --- a/api/tests/integration_tests/model_runtime/minimax/test_provider.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.minimax.minimax import MinimaxProvider - - -def test_validate_provider_credentials(): - provider = MinimaxProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials( - credentials={ - "minimax_api_key": "hahahaha", - "minimax_group_id": "123", - } - ) - - provider.validate_provider_credentials( - credentials={ - "minimax_api_key": os.environ.get("MINIMAX_API_KEY"), - "minimax_group_id": os.environ.get("MINIMAX_GROUP_ID"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/mixedbread/__init__.py b/api/tests/integration_tests/model_runtime/mixedbread/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py b/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py deleted file mode 100644 index 25c9f3ce8d..0000000000 --- a/api/tests/integration_tests/model_runtime/mixedbread/test_provider.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -from unittest.mock import Mock, patch - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.mixedbread.mixedbread import MixedBreadProvider - - -def test_validate_provider_credentials(): - provider = MixedBreadProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"api_key": "hahahaha"}) - with patch("requests.post") as mock_post: - mock_response = Mock() - mock_response.json.return_value = { - "usage": {"prompt_tokens": 3, "total_tokens": 3}, - "model": "mixedbread-ai/mxbai-embed-large-v1", - "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}], - "object": "list", - "normalized": "true", - "encoding_format": "float", - "dimensions": 1024, - } - mock_response.status_code = 200 - mock_post.return_value = mock_response - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py b/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py deleted file mode 100644 index b65aab74aa..0000000000 --- a/api/tests/integration_tests/model_runtime/mixedbread/test_rerank.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -from unittest.mock import Mock, patch - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.mixedbread.rerank.rerank import MixedBreadRerankModel - - -def test_validate_credentials(): - model = MixedBreadRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mxbai-rerank-large-v1", - credentials={"api_key": "invalid_key"}, - ) - with patch("httpx.post") as mock_post: - mock_response = Mock() - mock_response.json.return_value = { - "usage": {"prompt_tokens": 86, "total_tokens": 86}, - "model": "mixedbread-ai/mxbai-rerank-large-v1", - "data": [ - { - "index": 0, - "score": 0.06762695, - "input": "Carson City is the capital city of the American state of Nevada. At the 2010 United " - "States Census, Carson City had a population of 55,274.", - "object": "text_document", - }, - { - "index": 1, - "score": 0.057403564, - "input": "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific " - "Ocean that are a political division controlled by the United States. Its capital is " - "Saipan.", - "object": "text_document", - }, - ], - "object": "list", - "top_k": 2, - "return_input": True, - } - mock_response.status_code = 200 - mock_post.return_value = mock_response - model.validate_credentials( - model="mxbai-rerank-large-v1", - credentials={ - "api_key": os.environ.get("MIXEDBREAD_API_KEY"), - }, - ) - - -def test_invoke_model(): - model = MixedBreadRerankModel() - with patch("httpx.post") as mock_post: - mock_response = Mock() - mock_response.json.return_value = { - "usage": {"prompt_tokens": 56, "total_tokens": 56}, - "model": "mixedbread-ai/mxbai-rerank-large-v1", - "data": [ - { - "index": 0, - "score": 0.6044922, - "input": "Kasumi is a girl name of Japanese origin meaning mist.", - "object": "text_document", - }, - { - "index": 1, - "score": 0.0703125, - "input": "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a " - "team named PopiParty.", - "object": "text_document", - }, - ], - "object": "list", - "top_k": 2, - "return_input": "true", - } - mock_response.status_code = 200 - mock_post.return_value = mock_response - result = model.invoke( - model="mxbai-rerank-large-v1", - credentials={ - "api_key": os.environ.get("MIXEDBREAD_API_KEY"), - }, - query="Who is Kasumi?", - docs=[ - "Kasumi is a girl name of Japanese origin meaning mist.", - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music and she leads a team named " - "PopiParty.", - ], - score_threshold=0.5, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 0 - assert result.docs[0].score >= 0.5 diff --git a/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py b/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py deleted file mode 100644 index ca97a18951..0000000000 --- a/api/tests/integration_tests/model_runtime/mixedbread/test_text_embedding.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -from unittest.mock import Mock, patch - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.mixedbread.text_embedding.text_embedding import MixedBreadTextEmbeddingModel - - -def test_validate_credentials(): - model = MixedBreadTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="mxbai-embed-large-v1", credentials={"api_key": "invalid_key"}) - with patch("requests.post") as mock_post: - mock_response = Mock() - mock_response.json.return_value = { - "usage": {"prompt_tokens": 3, "total_tokens": 3}, - "model": "mixedbread-ai/mxbai-embed-large-v1", - "data": [{"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}], - "object": "list", - "normalized": "true", - "encoding_format": "float", - "dimensions": 1024, - } - mock_response.status_code = 200 - mock_post.return_value = mock_response - model.validate_credentials( - model="mxbai-embed-large-v1", credentials={"api_key": os.environ.get("MIXEDBREAD_API_KEY")} - ) - - -def test_invoke_model(): - model = MixedBreadTextEmbeddingModel() - - with patch("requests.post") as mock_post: - mock_response = Mock() - mock_response.json.return_value = { - "usage": {"prompt_tokens": 6, "total_tokens": 6}, - "model": "mixedbread-ai/mxbai-embed-large-v1", - "data": [ - {"embedding": [0.23333 for _ in range(1024)], "index": 0, "object": "embedding"}, - {"embedding": [0.23333 for _ in range(1024)], "index": 1, "object": "embedding"}, - ], - "object": "list", - "normalized": "true", - "encoding_format": "float", - "dimensions": 1024, - } - mock_response.status_code = 200 - mock_post.return_value = mock_response - result = model.invoke( - model="mxbai-embed-large-v1", - credentials={ - "api_key": os.environ.get("MIXEDBREAD_API_KEY"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 6 - - -def test_get_num_tokens(): - model = MixedBreadTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="mxbai-embed-large-v1", - credentials={ - "api_key": os.environ.get("MIXEDBREAD_API_KEY"), - }, - texts=["ping"], - ) - - assert num_tokens == 1 diff --git a/api/tests/integration_tests/model_runtime/nomic/__init__.py b/api/tests/integration_tests/model_runtime/nomic/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py b/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py deleted file mode 100644 index 52dc96ee95..0000000000 --- a/api/tests/integration_tests/model_runtime/nomic/test_embeddings.py +++ /dev/null @@ -1,62 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock - - -@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) -def test_validate_credentials(setup_nomic_mock): - model = NomicTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="nomic-embed-text-v1.5", - credentials={ - "nomic_api_key": "invalid_key", - }, - ) - - model.validate_credentials( - model="nomic-embed-text-v1.5", - credentials={ - "nomic_api_key": os.environ.get("NOMIC_API_KEY"), - }, - ) - - -@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) -def test_invoke_model(setup_nomic_mock): - model = NomicTextEmbeddingModel() - - result = model.invoke( - model="nomic-embed-text-v1.5", - credentials={ - "nomic_api_key": os.environ.get("NOMIC_API_KEY"), - }, - texts=["hello", "world"], - user="foo", - ) - - assert isinstance(result, TextEmbeddingResult) - assert result.model == "nomic-embed-text-v1.5" - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) -def test_get_num_tokens(setup_nomic_mock): - model = NomicTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="nomic-embed-text-v1.5", - credentials={ - "nomic_api_key": os.environ.get("NOMIC_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/nomic/test_provider.py b/api/tests/integration_tests/model_runtime/nomic/test_provider.py deleted file mode 100644 index 6cad400c06..0000000000 --- a/api/tests/integration_tests/model_runtime/nomic/test_provider.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.nomic.nomic import NomicAtlasProvider -from core.model_runtime.model_providers.nomic.text_embedding.text_embedding import NomicTextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.nomic_embeddings import setup_nomic_mock - - -@pytest.mark.parametrize("setup_nomic_mock", [["text_embedding"]], indirect=True) -def test_validate_provider_credentials(setup_nomic_mock): - provider = NomicAtlasProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={ - "nomic_api_key": os.environ.get("NOMIC_API_KEY"), - }, - ) diff --git a/api/tests/integration_tests/model_runtime/novita/__init__.py b/api/tests/integration_tests/model_runtime/novita/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/novita/test_llm.py b/api/tests/integration_tests/model_runtime/novita/test_llm.py deleted file mode 100644 index 35fa0dc190..0000000000 --- a/api/tests/integration_tests/model_runtime/novita/test_llm.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.novita.llm.llm import NovitaLargeLanguageModel - - -def test_validate_credentials(): - model = NovitaLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="meta-llama/llama-3-8b-instruct", credentials={"api_key": "invalid_key", "mode": "chat"} - ) - - model.validate_credentials( - model="meta-llama/llama-3-8b-instruct", - credentials={"api_key": os.environ.get("NOVITA_API_KEY"), "mode": "chat"}, - ) - - -def test_invoke_model(): - model = NovitaLargeLanguageModel() - - response = model.invoke( - model="meta-llama/llama-3-8b-instruct", - credentials={"api_key": os.environ.get("NOVITA_API_KEY"), "mode": "completion"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_p": 0.5, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="novita", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = NovitaLargeLanguageModel() - - response = model.invoke( - model="meta-llama/llama-3-8b-instruct", - credentials={"api_key": os.environ.get("NOVITA_API_KEY"), "mode": "chat"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={"temperature": 1.0, "top_k": 2, "top_p": 0.5, "max_tokens": 100}, - stream=True, - user="novita", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_get_num_tokens(): - model = NovitaLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="meta-llama/llama-3-8b-instruct", - credentials={ - "api_key": os.environ.get("NOVITA_API_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/novita/test_provider.py b/api/tests/integration_tests/model_runtime/novita/test_provider.py deleted file mode 100644 index 191af99db2..0000000000 --- a/api/tests/integration_tests/model_runtime/novita/test_provider.py +++ /dev/null @@ -1,19 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.novita.novita import NovitaProvider - - -def test_validate_provider_credentials(): - provider = NovitaProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={ - "api_key": os.environ.get("NOVITA_API_KEY"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/oci/__init__.py b/api/tests/integration_tests/model_runtime/oci/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/oci/test_llm.py b/api/tests/integration_tests/model_runtime/oci/test_llm.py deleted file mode 100644 index 531f26a32e..0000000000 --- a/api/tests/integration_tests/model_runtime/oci/test_llm.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.oci.llm.llm import OCILargeLanguageModel - - -def test_validate_credentials(): - model = OCILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="cohere.command-r-plus", - credentials={"oci_config_content": "invalid_key", "oci_key_content": "invalid_key"}, - ) - - model.validate_credentials( - model="cohere.command-r-plus", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - ) - - -def test_invoke_model(): - model = OCILargeLanguageModel() - - response = model.invoke( - model="cohere.command-r-plus", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - prompt_messages=[UserPromptMessage(content="Hi")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = OCILargeLanguageModel() - - response = model.invoke( - model="meta.llama-3-70b-instruct", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - prompt_messages=[UserPromptMessage(content="Hi")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_model_with_function(): - model = OCILargeLanguageModel() - - response = model.invoke( - model="cohere.command-r-plus", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - prompt_messages=[UserPromptMessage(content="Hi")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=False, - user="abc-123", - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_get_num_tokens(): - model = OCILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="cohere.command-r-plus", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 18 diff --git a/api/tests/integration_tests/model_runtime/oci/test_provider.py b/api/tests/integration_tests/model_runtime/oci/test_provider.py deleted file mode 100644 index 2c7107c7cc..0000000000 --- a/api/tests/integration_tests/model_runtime/oci/test_provider.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.oci.oci import OCIGENAIProvider - - -def test_validate_provider_credentials(): - provider = OCIGENAIProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/oci/test_text_embedding.py b/api/tests/integration_tests/model_runtime/oci/test_text_embedding.py deleted file mode 100644 index 032c5c681a..0000000000 --- a/api/tests/integration_tests/model_runtime/oci/test_text_embedding.py +++ /dev/null @@ -1,58 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.oci.text_embedding.text_embedding import OCITextEmbeddingModel - - -def test_validate_credentials(): - model = OCITextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="cohere.embed-multilingual-v3.0", - credentials={"oci_config_content": "invalid_key", "oci_key_content": "invalid_key"}, - ) - - model.validate_credentials( - model="cohere.embed-multilingual-v3.0", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - ) - - -def test_invoke_model(): - model = OCITextEmbeddingModel() - - result = model.invoke( - model="cohere.embed-multilingual-v3.0", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - # assert result.usage.total_tokens == 811 - - -def test_get_num_tokens(): - model = OCITextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="cohere.embed-multilingual-v3.0", - credentials={ - "oci_config_content": os.environ.get("OCI_CONFIG_CONTENT"), - "oci_key_content": os.environ.get("OCI_KEY_CONTENT"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/ollama/__init__.py b/api/tests/integration_tests/model_runtime/ollama/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/ollama/test_llm.py b/api/tests/integration_tests/model_runtime/ollama/test_llm.py deleted file mode 100644 index 58a1339f50..0000000000 --- a/api/tests/integration_tests/model_runtime/ollama/test_llm.py +++ /dev/null @@ -1,222 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.ollama.llm.llm import OllamaLargeLanguageModel - - -def test_validate_credentials(): - model = OllamaLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mistral:text", - credentials={ - "base_url": "http://localhost:21434", - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - ) - - model.validate_credentials( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - ) - - -def test_invoke_model(): - model = OllamaLargeLanguageModel() - - response = model.invoke( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 1.0, "top_k": 2, "top_p": 0.5, "num_predict": 10}, - stop=["How"], - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = OllamaLargeLanguageModel() - - response = model.invoke( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={"temperature": 1.0, "top_k": 2, "top_p": 0.5, "num_predict": 10}, - stop=["How"], - stream=True, - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_invoke_completion_model(): - model = OllamaLargeLanguageModel() - - response = model.invoke( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "completion", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 1.0, "top_k": 2, "top_p": 0.5, "num_predict": 10}, - stop=["How"], - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_completion_model(): - model = OllamaLargeLanguageModel() - - response = model.invoke( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "completion", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={"temperature": 1.0, "top_k": 2, "top_p": 0.5, "num_predict": 10}, - stop=["How"], - stream=True, - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_invoke_completion_model_with_vision(): - model = OllamaLargeLanguageModel() - - result = model.invoke( - model="llava", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "completion", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[ - UserPromptMessage( - content=[ - TextPromptMessageContent( - data="What is this in this picture?", - ), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ) - ], - model_parameters={"temperature": 0.1, "num_predict": 100}, - stream=False, - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -def test_invoke_chat_model_with_vision(): - model = OllamaLargeLanguageModel() - - result = model.invoke( - model="llava", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[ - UserPromptMessage( - content=[ - TextPromptMessageContent( - data="What is this in this picture?", - ), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ) - ], - model_parameters={"temperature": 0.1, "num_predict": 100}, - stream=False, - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -def test_get_num_tokens(): - model = OllamaLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 2048, - "max_tokens": 2048, - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 6 diff --git a/api/tests/integration_tests/model_runtime/ollama/test_text_embedding.py b/api/tests/integration_tests/model_runtime/ollama/test_text_embedding.py deleted file mode 100644 index 3c4f740a4f..0000000000 --- a/api/tests/integration_tests/model_runtime/ollama/test_text_embedding.py +++ /dev/null @@ -1,65 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.ollama.text_embedding.text_embedding import OllamaEmbeddingModel - - -def test_validate_credentials(): - model = OllamaEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mistral:text", - credentials={ - "base_url": "http://localhost:21434", - "mode": "chat", - "context_size": 4096, - }, - ) - - model.validate_credentials( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 4096, - }, - ) - - -def test_invoke_model(): - model = OllamaEmbeddingModel() - - result = model.invoke( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 4096, - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = OllamaEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="mistral:text", - credentials={ - "base_url": os.environ.get("OLLAMA_BASE_URL"), - "mode": "chat", - "context_size": 4096, - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/openai/test_llm.py b/api/tests/integration_tests/model_runtime/openai/test_llm.py deleted file mode 100644 index 3b3ea9ec80..0000000000 --- a/api/tests/integration_tests/model_runtime/openai/test_llm.py +++ /dev/null @@ -1,313 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.openai.llm.llm import OpenAILargeLanguageModel - -"""FOR MOCK FIXTURES, DO NOT REMOVE""" -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -def test_predefined_models(): - model = OpenAILargeLanguageModel() - model_schemas = model.predefined_models() - - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="gpt-3.5-turbo", credentials={"openai_api_key": "invalid_key"}) - - model.validate_credentials(model="gpt-3.5-turbo", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}) - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_validate_credentials_for_completion_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="text-davinci-003", credentials={"openai_api_key": "invalid_key"}) - - model.validate_credentials( - model="text-davinci-003", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_invoke_completion_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo-instruct", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY"), "openai_api_base": "https://api.openai.com"}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 1}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - assert model._num_tokens_from_string("gpt-3.5-turbo-instruct", result.message.content) == 1 - - -@pytest.mark.parametrize("setup_openai_mock", [["completion"]], indirect=True) -def test_invoke_stream_completion_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo-instruct", - credentials={ - "openai_api_key": os.environ.get("OPENAI_API_KEY"), - "openai_organization": os.environ.get("OPENAI_ORGANIZATION"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.0, - "top_p": 1.0, - "presence_penalty": 0.0, - "frequency_penalty": 0.0, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_vision(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-4-vision-preview", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content=[ - TextPromptMessageContent( - data="Hello World!", - ), - ImagePromptMessageContent( - data="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAE4AAABMCAYAAADDYoEWAAAMQGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnluSkEBoAQSkhN4EkRpASggt9I4gKiEJEEqMgaBiRxcVXLuIgA1dFVGwAmJBETuLYu+LBRVlXSzYlTcpoOu+8r35vrnz33/O/OfMmbllAFA7zhGJclF1APKEBeLYYH/6uOQUOukpIAEdoAy0gA2Hmy9iRkeHA1iG2r+Xd9cBIm2v2Eu1/tn/X4sGj5/PBQCJhjidl8/Ng/gAAHg1VyQuAIAo5c2mFoikGFagJYYBQrxIijPluFqK0+V4j8wmPpYFcTsASiocjjgTANVLkKcXcjOhhmo/xI5CnkAIgBodYp+8vMk8iNMgtoY2Ioil+oz0H3Qy/6aZPqzJ4WQOY/lcZEUpQJAvyuVM/z/T8b9LXq5kyIclrCpZ4pBY6Zxh3m7mTA6TYhWI+4TpkVEQa0L8QcCT2UOMUrIkIQlye9SAm8+COYMrDVBHHicgDGIDiIOEuZHhCj49QxDEhhjuEHSaoIAdD7EuxIv4+YFxCptN4smxCl9oY4aYxVTwZzlimV+pr/uSnASmQv91Fp+t0MdUi7LikyCmQGxeKEiMhFgVYof8nLgwhc3YoixW5JCNWBIrjd8c4li+MNhfro8VZoiDYhX2pXn5Q/PFNmUJ2JEKvK8gKz5Enh+sncuRxQ/ngl3iC5kJQzr8/HHhQ3Ph8QMC5XPHnvGFCXEKnQ+iAv9Y+VicIsqNVtjjpvzcYClvCrFLfmGcYiyeWAA3pFwfzxAVRMfL48SLsjmh0fJ48OUgHLBAAKADCazpYDLIBoLOvqY+eCfvCQIcIAaZgA/sFczQiCRZjxBe40AR+BMiPsgfHucv6+WDQsh/HWblV3uQIestlI3IAU8gzgNhIBfeS2SjhMPeEsFjyAj+4Z0DKxfGmwurtP/f80Psd4YJmXAFIxnySFcbsiQGEgOIIcQgog2uj/vgXng4vPrB6oQzcI+heXy3JzwhdBEeEq4Rugm3JgmKxT9FGQG6oX6QIhfpP+YCt4Sarrg/7g3VoTKug+sDe9wF+mHivtCzK2RZirilWaH/pP23GfywGgo7siMZJY8g+5Gtfx6paqvqOqwizfWP+ZHHmj6cb9Zwz8/+WT9knwfbsJ8tsUXYfuwMdgI7hx3BmgAda8WasQ7sqBQP767Hst015C1WFk8O1BH8w9/Qykozme9Y59jr+EXeV8CfJn1HA9Zk0XSxIDOrgM6EXwQ+nS3kOoyiOzk6OQMg/b7IX19vYmTfDUSn4zs3/w8AvFsHBwcPf+dCWwHY6w4f/0PfOWsG/HQoA3D2EFciLpRzuPRCgG8JNfik6QEjYAas4XycgBvwAn4gEISCKBAPksFEGH0W3OdiMBXMBPNACSgDy8EaUAk2gi1gB9gN9oEmcAScAKfBBXAJXAN34O7pAS9AP3gHPiMIQkKoCA3RQ4wRC8QOcUIYiA8SiIQjsUgykoZkIkJEgsxE5iNlyEqkEtmM1CJ7kUPICeQc0oXcQh4gvchr5BOKoSqoFmqIWqKjUQbKRMPQeHQCmolOQYvQBehStAKtQXehjegJ9AJ6De1GX6ADGMCUMR3MBLPHGBgLi8JSsAxMjM3GSrFyrAarx1rgOl/BurE+7CNOxGk4HbeHOzgET8C5+BR8Nr4Er8R34I14O34Ff4D3498IVIIBwY7gSWATxhEyCVMJJYRywjbCQcIp+Cz1EN4RiUQdohXRHT6LycRs4gziEuJ6YgPxOLGL+Ig4QCKR9Eh2JG9SFIlDKiCVkNaRdpFaSZdJPaQPSspKxkpOSkFKKUpCpWKlcqWdSseULis9VfpMVidbkD3JUWQeeTp5GXkruYV8kdxD/kzRoFhRvCnxlGzKPEoFpZ5yinKX8kZZWdlU2UM5RlmgPFe5QnmP8lnlB8ofVTRVbFVYKqkqEpWlKttVjqvcUnlDpVItqX7UFGoBdSm1lnqSep/6QZWm6qDKVuWpzlGtUm1Uvaz6Uo2sZqHGVJuoVqRWrrZf7aJanzpZ3VKdpc5Rn61epX5I/Yb6gAZNY4xGlEaexhKNnRrnNJ5pkjQtNQM1eZoLNLdontR8RMNoZjQWjUubT9tKO0Xr0SJqWWmxtbK1yrR2a3Vq9WtrartoJ2pP067SPqrdrYPpWOqwdXJ1luns07mu82mE4QjmCP6IxSPqR1we8V53pK6fLl+3VLdB95ruJz26XqBejt4KvSa9e/q4vq1+jP5U/Q36p/T7RmqN9BrJHVk6ct/I2waoga1BrMEMgy0GHQYDhkaGwYYiw3WGJw37jHSM/IyyjVYbHTPqNaYZ+xgLjFcbtxo/p2vTmfRcegW9nd5vYmASYiIx2WzSafLZ1Mo0wbTYtMH0nhnFjGGWYbbarM2s39zYPMJ8pnmd+W0LsgXDIstircUZi/eWVpZJlgstmyyfWelasa2KrOqs7lpTrX2tp1jXWF+1IdowbHJs1ttcskVtXW2zbKtsL9qhdm52Arv1dl2jCKM8RglH1Yy6Ya9iz7QvtK+zf+Cg4xDuUOzQ5PBytPnolNErRp8Z/c3R1THXcavjnTGaY0LHFI9pGfPaydaJ61TldNWZ6hzkPMe52fmVi50L32WDy01XmmuE60LXNtevbu5uYrd6t153c/c092r3GwwtRjRjCeOsB8HD32OOxxGPj55ungWe+zz/8rL3yvHa6fVsrNVY/titYx95m3pzvDd7d/vQfdJ8Nvl0+5r4cnxrfB/6mfnx/Lb5PWXaMLOZu5gv/R39xf4H/d+zPFmzWMcDsIDggNKAzkDNwITAysD7QaZBmUF1Qf3BrsEzgo+HEELCQlaE3GAbsrnsWnZ/qHvorND2MJWwuLDKsIfhtuHi8JYINCI0YlXE3UiLSGFkUxSIYketiroXbRU9JfpwDDEmOqYq5knsmNiZsWfiaHGT4nbGvYv3j18WfyfBOkGS0JaolpiaWJv4PikgaWVS97jR42aNu5CsnyxIbk4hpSSmbEsZGB84fs34nlTX1JLU6xOsJkybcG6i/sTciUcnqU3iTNqfRkhLStuZ9oUTxanhDKSz06vT+7ks7lruC54fbzWvl+/NX8l/muGdsTLjWaZ35qrM3izfrPKsPgFLUCl4lR2SvTH7fU5Uzvacwdyk3IY8pby0vENCTWGOsH2y0eRpk7tEdqISUfcUzylrpvSLw8Tb8pH8CfnNBVrwR75DYi35RfKg0KewqvDD1MSp+6dpTBNO65huO33x9KdFQUW/zcBncGe0zTSZOW/mg1nMWZtnI7PTZ7fNMZuzYE7P3OC5O+ZR5uXM+73YsXhl8dv5SfNbFhgumLvg0S/Bv9SVqJaIS24s9Fq4cRG+SLCoc7Hz4nWLv5XySs+XOZaVl31Zwl1y/tcxv1b8Org0Y2nnMrdlG5YTlwuXX1/hu2LHSo2VRSsfrYpY1biavrp09ds1k9acK3cp37iWslaytrsivKJ5nfm65eu+VGZVXqvyr2qoNqheXP1+PW/95Q1+G+o3Gm4s2/hpk2DTzc3BmxtrLGvKtxC3FG55sjVx65nfGL/VbtPfVrbt63bh9u4dsTvaa91ra3ca7FxWh9ZJ6np3pe66tDtgd3O9ff3mBp2Gsj1gj2TP871pe6/vC9vXtp+xv/6AxYHqg7SDpY1I4/TG/qaspu7m5OauQ6GH2lq8Wg4edji8/YjJkaqj2keXHaMcW3BssLWodeC46HjficwTj9omtd05Oe7k1faY9s5TYafOng46ffIM80zrWe+zR855njt0nnG+6YLbhcYO146Dv7v+frDTrbPxovvF5ksel1q6xnYdu+x7+cSVgCunr7KvXrgWea3resL1mzdSb3Tf5N18div31qvbhbc/35l7l3C39J76vfL7Bvdr/rD5o6Hbrfvog4AHHQ/jHt55xH304nH+4y89C55Qn5Q/NX5a+8zp2ZHeoN5Lz8c/73khevG5r+RPjT+rX1q/PPCX318d/eP6e16JXw2+XvJG7832ty5v2waiB+6/y3v3+X3pB70POz4yPp75lPTp6eepX0hfKr7afG35Fvbt7mDe4KCII+bIfgUwWNGMDABebweAmgwADZ7PKOPl5z9ZQeRnVhkC/wnLz4iy4gZAPfx/j+mDfzc3ANizFR6/oL5aKgDRVADiPQDq7Dxch85qsnOltBDhOWBT5Nf0vHTwb4r8zPlD3D+3QKrqAn5u/wWdZ3xtG7qP3QAAADhlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAAqACAAQAAAABAAAATqADAAQAAAABAAAATAAAAADhTXUdAAARnUlEQVR4Ae2c245bR3aGi4fulizFHgUzQAYIggBB5klymfeaZ8hDBYjvAiRxkMAGkowRWx7JktjcZL7vX1Uku62Burkl5YbV5q7Tqqq1/v3XqgMpL95tbvftEh6NwPLRLS4NgsAFuDOJcAHuAtyZCJzZ7MK4C3BnInBmswvjLsCdicCZzS6MOxO49Znt0uz3//CPbbv6srXFrq0W9Q6Wi0VbLPn4R8x/jSLiu3nrl8s9dcartlwtKdmTbm21XranN6v27Mm6XV8t25fP1+3Pn1+1r4if3Czbk+t9u1rR6f9jmAXc1P6sbaevQGbfdgGJeA8ke0AQsCYYgiYgPR1QyVO+3wvcMm2WO0G2PeWkX79btp839AG4//UjYC62gDsB2rI9f7pov3q2bX/9F1ftBWAufTufOcwCrnTtR90dOdHoNgCJeAbUkuM5TsWAW5W9gfkE83ZkUHg0oAyAwbm927a2ebVoP/xx2f7jD1uYuG9/89tF+/VXK1hq+88TZgG32O1g2r7tpRdBM8fUTM7pyR8SYddgxkJErUszHti7U44CpzyEo16syNtx+qgy+1og7RMetpev9+3rb3bt+c2u/ebFsv3uL1ftiqn+qcMs4HY7jNQpEfadNU5VqeHUTJkgUbaPDxRADdZ8jU9LHoJYnwLUtgWN4ObDC7Kdr8Hp7d9qMTW8gt23V1zyvPrD1H56e9t+99vr9uJLprBDfaIw69U4dQRCIw2JdVIjbUzecj+7qYyPpZHiAbDaJwsXyMhQEQ0pq6sAp7hMS2XGqykdA2iy4EUtF6v206ur9k/fbNo//+frtt2OaW/rjxtmAaeNGqihBY5xfVQzQEZfoSH0KHgkrbD/CX6vPIqlSTU61vVCovRSbEwbIS851vj23Q+tff3vu/bzu5I7tvs4qVnADTa5FCbNC86qCLN2E1MxKKroYB2pgSz2RLbbVcVkSJhOKxIDjGxn+nSuqes2JlKuG8fA/IzPXazbj68X7et/27UfX7GifORwOuSju47h/c3beKfRFO74CNA04YP0ZT2/YzERFGojc9pmDG47/wyDZwJjiX4wwJNer1dZPJbs5/xzK5Ppzp7SQZBszNy22U7tX7/dtFdvJrv8aGE2cDJLoPycBgHSgICJUQLo8nmUo6y7oH0S5Lu/FGhDQULCfIooATw3yyOQQ46eYVpYiaBMTFtAFPR307r9y3fbdvsRfd5Rg6HJI2Lt1qaAF6TEqoxWdVdYSHawezCvAHLjW7Jh2QGcUkDDT4Og2OfSFRVkxipcAJUZARC5FVRbeRpB1hVY6r25XQHexIZ96Hfa++PTs4Dbi8rQg7imWQG27/uEgCTCssk/WWg7GwJWwDQ36PceGzQ+x7jOtgNogkIIpsZiFMdXoEfOPUlh3l5ulu2/X6bJ7Mc84Bw+xgOKzJqM0VKm8WYlVMqt61gFKNtQKeZ6o7Ls/aqEeYooJXDIZ9uiT0uZ5UxPUJNlYdoAK62qHfM7unz3/bb9/Ha+v3u/tn3AD0XOrnxAZdpNYZILgoxyGk4BqMCbssq66dXv6RdFkiB6Rj2u3N1npiMw1dQjF4oJW/kzy6VdMRFA9Xd8VvhCLxCyYUYkvhHZb7+fotvdUR6XmwXcYI1DangAA6yspgBj/dRjp6L+RbmSPaaxuuMnGEeVAhBF4pSapAFG5gUo60rAHmpVtcz0sR2aBZW8NAB9+W7dXr9N0dmPmUcu10pWrq7kQQvBQXn1dUsgoM4ej12TtyBknG51PEMGOV2TLLVZ/GLvLMBYHsYJhg7fuMBx6tq3LFu7aBxxD9jKFiO7Thbwcv7n5dS+/ML0eWEWcBqoptk+mEQp2aTG+rbmBYA+D6MyMwMAdepKsX5QpnglFZyZ5k4tDYsI/Y1pF7CRq22HoHXgGEOwgodvgH79INnW3tlFIVVQvkBXg1dvF3z27fkTGzw+zALOPZluVoVkV4yLHoBB3VBJUNyo6uEWXAyIkruC2OQjbVeppxkm8+iti2mySsM1EPYGKBcEyul3LKTW1+pr+wLRstwP0J8a2K95Txf/+6q1ZzeUDEXt/oFhHnA4fJYCBtawYlWmlsrJBEHhP43bi9Rq1Z0ymlK3Z/QCRqA5YfaNLZJWEACn929eluXlUGO8CgMrHWYi441S2tsFebLRL5RWL0e0nL64SEEf2sjMR4ZZwA0Ddfziclz1eN8yDn1qAaHSq3G0FEQXjABDo51sJVNyGnA0QlAPL4LOApzMo0mY1sUFbQBj8xTzYhKrROYF5VGIftR1uW3+3uiWU8XnBw7l3HIYVG/P/djYgMZoyrTJrci0n2qPZVnNFV913viW6btGzsXBT6aW3VKmsauVTFOc2DxpP5YJYLBBeCUixE71IlGBR2EF+6OugHbP12Ddoj29HgIPj+cxDiPDFGINzB8sKhLh0Ui4gOgDI8deb8FiwYxlteWhLHWTlmOzhkxLAObPIkFqS8+bbG5BdgWiAmJTwXdqZ7oysktzdKC/BWMWiAJNpyP0ZPTMItRy7fTi2RB4eDwLuIkpCma1gob/Dsw7zcKAMf3txiCot8c42ZCDPu3WAqRMJAGEk4cACaLzSZsFRhAE9QoAtXcwTX92XDT0sxTQXJYHdDJin0KfVN8PmzNvnOYBx5XNlik4giumihb7tJ60ezgNhgXuXgRNttxunZYAj7uzbL3nUA67rm5KJWrJCyTfIVwBMh3bTkD8TqFYp6uv8RwrgJpAZmHHScqv0qWeKT48NujhAuELekyYBdz9gXJQ53DvDh3tU62xTtN8bQhzzE9OccAK8wA2ez2k3cNtN7wM/RZs9M5NkNZoee0H2rmhLr8miPV9roAZtN1RHV/gDb7EoUtXKeXjYXUBN0oeFs8CbrtlhZRGPZSSZNyI9gA+TBFkelFNWxgEgCtG3wDiFqEr5Jz6y/U1DAM4QLxi2l7DNhl3w/epNTUFWGbXC7HrMQMz7WUbf8AaDQ46DYXuxLoJX6CFRzvuiPyJzCzgZIoKyqgKAx1yAGPQUWfa+GoDsqwDJNnHLF9juSz0i5VrpvqSwmsQul5dtyfrfX1zL3i0WdHHSjaKVjf0T5k7ABtxlEHbwxusgjydAY8N84BjvAx5GLfMqBW0VJEZ+pwKskQnbpnFHPzpwWo/bzkGvX51296+bu1v/+qL9usXT9rTJ07Bzh9k9HEPsxNhwhh6xLXKo3fXWf3iMkrBBz9nAbflbHm6ONxhXp8/NW26lkSleIEV9FBVI+o6ihjmffPDt+3v/+5Z+82vnsZw/fyercweB2d7wzA8mfuPEknpXTnHvQsoPd1v/aD8LODw+AxbAw/QjnEfv69u5kz6dtOiW2R6YmW7vd0C3qK94wcjf/zxZ1bRXfvqGT6U3f2G/Z6AesqotgJX477PNVmTmxfiwTSS5irqz2ybEHD6PzbMAk7lS/0BxgkTqPAUYBiAkQpTLLdKxe1D4Lbsp968uW1vXk+ZrnpsN7yL1TbmbvCl4GcPPPStZWyNcM9s++9y92ruZu2CT21q7lZ9KDcLuC3WbmGG42uA30EISOVkFynt1BBialOliF/wZHqGTa1tOfq8fbMHPL6N2iBPW2d7HfxZdWnreiN49UL0dfhLR6tBSVVwNo+TQ1U5IsHvQU4Dcry7bGNOix+SngVcwAhYpZjTQxaNMABLLLtUFEAMEwi4kk63fGDbLTcVm82ubd7hNylzEXCa6SPdz2Vf5iUobe0jAFIq8+JHT8CjGeUjHFOj5E7MIO4THxvOaHIcwu2IOKiznyg89BTEXi6WssO8B36vkLa33Pv7/QRbEtm21c/BtIm9Yb4ho19PDg4g09aeucySdpzq3BfVx6WQqh7MkLOSkHLf2olEKni4n7xznh0VH4jnAYdy6hfVSZTvUmF54f2cU9d9XmlhvUyTlbkxIT0BWtgH4wRRgPMy7EFbAwi8ojzbNyqtH/7coWxnUHyE+rmYjbs3NCnqdwIbbM/GZ4RZwDleVskO3viSBhWjSu2Pxj7JU4bsqrzTU5YZQ7xKu73Bb8bAbo+s28NStxEyb8e+K1UAKXhOVivK7x0RUANf3zEw/smJpsr37cad9RlhFnCbzQYwfN36I+5qwxgVwRA/vOHxlneeMiaux9lymN5tTTttkZN5mbZwCYsLM550taA+zJM5gsdHsGSdQTbngN7ZlC/JrRhXIcorRJvVcp2pnjzdy+0nnErOCbOAE5x8d4oVCy4xMSFGetjfgWJ3MQFHdomxZbUwwC4B84YlzBNojUEmxmqO1tVC4VcVopUzKuXK+XArUeDVTyq85wv7xKqHsel1dfIUkl8zUXcFm8eUH7IPjWcBp8J5mYxWcWmbclhlyEIAMJm2HbSwDCHZGD9IuR1UH4MhaZ4HOAIQIJOrIxfjxOFRUMNQq8wI9EH5WNVJdcEje22ofxs3K6PlQ+OZwA2ghrFSKhiEVSqh/5JJcfodKBnntLac7wb5CKLpAs+0RguYuAhoNh2CRV1dTVFhqWhRn/u+tOsMtTph6JhOkAWsQDz1K3NHeHyYBZyK70BG5oy3SyqGumoaAhr1Aiggnm8FzXr3cQWSq++p8seM10v6LW9Elgh5kyGINXMdi1xspw2LRHwqMjJTV2KdU9c2eQ1SkXDDHL2aYf2MprVp1dFrtcBlAWB/sNuxMoJIzEfRqhMk04qXfM0n8yVDaa/DRLp1GuGSKhNz65ZEOQUSdyD0Y/adRSojsxjoz2jnNFdN3l/S+sUvnqbDsx+zgCvQMJzhPaCrlouCLBvbA43x68DhsAc7DxpTr0y39VAMBCfpSlpSUMggzRe8X4bIAWRYJqVJj6t7feMV/9Bkfeb+bYw2Czg78S3GwWtEQEPRWFMMEDAZhVTiMaWLnZZRxSexfaStPR9DAXbMj5Qs479Dm8PqqYCNEpUTVAe/GpLC3vH16hI64zkLuB1XQVsdFkED8ps40oLjj2sMAdbFwGlKRjbW6UHAFZaRJVegIpeWVafZhQ4yHahUm+5VyfOwXYFHTX8DKUNSn+fCcsN3qOd8AT3GGPEs4EYnxho9YlOnU1WTUj98GbLKWCawI5wk71DiBMoh+qjYfgXUc+nNlW+rXuqjOrknPAs4sRoHcvvNguDZNEChYOoBUUZ175z9nMBZnQ6cnncgS7uDnt3BJ49Y8axqPYLZ0gVEb2DaICyHtOUM5t2eP7AJexWaGWYBVzcdsqneoAAViyzzo3ZsC1Jeq2qBKVhlkIxDsuSRrSY6/6S6eaaFjD+B4BGmMo9X9M06kcAdMq0qU5eT+lBBc8+GqaVmCc989iHP6yVvOcr4qE8ZLijVZ8VleC/5xWDWFmN6ow6aIKX75EfdL5rfKxBJgAcwwV/zeXrFjyqqo3uy52dnMa5oU4O7svo7YMNgWrFKdsk6WBXmmS82HuKsuADjHZFGi5iBIv+9qnn/qt+qSh3JTFNjPvWDiqpnA0SexYB/ijm6q5qP85wFnIZrXQHgillpVesHh9QVaAWWAJccfo/VNrOcbmrbYn/vCR9gy2m1aUH2WOa/rv4UoKnhPODowC2Gx6jQo4Nox4ZinDL392ssIHFSZWa1rTZJD/wSy0Kn34eDpwZvP1w96+dmH25zrsQs4KSLP4GAawWSjhnFZZQFmUZxOZSTj/ne2yUhIHCjRIlFKcIU0x852RjZTGGlDdaQrkxk7MPrJr/gzg17r4vgJ3rMAk4/wmQDE7wJhg+fFV1xaMGiMqnXaFc5jd4FjCCIRAEmAO5aPE7lzsw0ZelHYJB0PCWscErqOJcsrbllGmhmzE/7mAXcPof544Wlqg6wTuORtvKQzjV2gVC+shaNMhc24v8iIloGmS3ogc7bD9sS884Oi0kEP89jFnDX++/hCtPVtT7kwaxOkZpmxQ/L9vgdj1r+NCtAwQ6/A9DXMXnBqZgoHDdXP7Wna/Id6PRCum7DiREqcg1UPw9Yp6MsLv/HwlM4Hp7WQ1/CGQhcgDsDNJtcgLsAdyYCZza7MO4C3JkInNnswrgLcGcicGazC+POBO7/AH5zPa/ivytzAAAAAElFTkSuQmCC" - ), - ] - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_tools(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in London?", - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - PromptMessageTool( - name="get_stock_price", - description="Get the current stock price", - parameters={ - "type": "object", - "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, - "required": ["symbol"], - }, - ), - ], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_chat_model(setup_openai_mock): - model = OpenAILargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -def test_get_num_tokens(): - model = OpenAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="gpt-3.5-turbo-instruct", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 3 - - num_tokens = model.get_num_tokens( - model="gpt-3.5-turbo", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - ], - ) - - assert num_tokens == 72 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat", "remote"]], indirect=True) -def test_fine_tuned_models(setup_openai_mock): - model = OpenAILargeLanguageModel() - - remote_models = model.remote_models(credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}) - - if not remote_models: - assert isinstance(remote_models, list) - else: - assert isinstance(remote_models[0], AIModelEntity) - - for llm_model in remote_models: - if llm_model.model_type == ModelType.LLM: - break - - assert isinstance(llm_model, AIModelEntity) - - # test invoke - result = model.invoke( - model=llm_model.model, - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - - -def test__get_num_tokens_by_gpt2(): - model = OpenAILargeLanguageModel() - num_tokens = model._get_num_tokens_by_gpt2("Hello World!") - - assert num_tokens == 3 diff --git a/api/tests/integration_tests/model_runtime/openai/test_provider.py b/api/tests/integration_tests/model_runtime/openai/test_provider.py deleted file mode 100644 index 4d56cfcf3c..0000000000 --- a/api/tests/integration_tests/model_runtime/openai/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai.openai import OpenAIProvider -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_provider_credentials(setup_openai_mock): - provider = OpenAIProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/openai/test_speech2text.py b/api/tests/integration_tests/model_runtime/openai/test_speech2text.py deleted file mode 100644 index aa92c8b61f..0000000000 --- a/api/tests/integration_tests/model_runtime/openai/test_speech2text.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["speech2text"]], indirect=True) -def test_validate_credentials(setup_openai_mock): - model = OpenAISpeech2TextModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="whisper-1", credentials={"openai_api_key": "invalid_key"}) - - model.validate_credentials(model="whisper-1", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}) - - -@pytest.mark.parametrize("setup_openai_mock", [["speech2text"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = OpenAISpeech2TextModel() - - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - - # Get assets directory - assets_dir = os.path.join(os.path.dirname(current_dir), "assets") - - # Construct the path to the audio file - audio_file_path = os.path.join(assets_dir, "audio.mp3") - - # Open the file and get the file object - with open(audio_file_path, "rb") as audio_file: - file = audio_file - - result = model.invoke( - model="whisper-1", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, - file=file, - user="abc-123", - ) - - assert isinstance(result, str) - assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10" diff --git a/api/tests/integration_tests/model_runtime/openai/test_text_embedding.py b/api/tests/integration_tests/model_runtime/openai/test_text_embedding.py deleted file mode 100644 index f5dd73f2d4..0000000000 --- a/api/tests/integration_tests/model_runtime/openai/test_text_embedding.py +++ /dev/null @@ -1,48 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_validate_credentials(setup_openai_mock): - model = OpenAITextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="text-embedding-ada-002", credentials={"openai_api_key": "invalid_key"}) - - model.validate_credentials( - model="text-embedding-ada-002", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = OpenAITextEmbeddingModel() - - result = model.invoke( - model="text-embedding-ada-002", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY"), "openai_api_base": "https://api.openai.com"}, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = OpenAITextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="text-embedding-ada-002", - credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY"), "openai_api_base": "https://api.openai.com"}, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/openai_api_compatible/__init__.py b/api/tests/integration_tests/model_runtime/openai_api_compatible/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_llm.py b/api/tests/integration_tests/model_runtime/openai_api_compatible/test_llm.py deleted file mode 100644 index f2302ef05e..0000000000 --- a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_llm.py +++ /dev/null @@ -1,197 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel - -""" -Using Together.ai's OpenAI-compatible API as testing endpoint -""" - - -def test_validate_credentials(): - model = OAIAPICompatLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={"api_key": "invalid_key", "endpoint_url": "https://api.together.xyz/v1/", "mode": "chat"}, - ) - - model.validate_credentials( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - "endpoint_url": "https://api.together.xyz/v1/", - "mode": "chat", - }, - ) - - -def test_invoke_model(): - model = OAIAPICompatLargeLanguageModel() - - response = model.invoke( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - "endpoint_url": "https://api.together.xyz/v1/", - "mode": "completion", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = OAIAPICompatLargeLanguageModel() - - response = model.invoke( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - "endpoint_url": "https://api.together.xyz/v1/", - "mode": "chat", - "stream_mode_delimiter": "\\n\\n", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_invoke_stream_model_without_delimiter(): - model = OAIAPICompatLargeLanguageModel() - - response = model.invoke( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - "endpoint_url": "https://api.together.xyz/v1/", - "mode": "chat", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -# using OpenAI's ChatGPT-3.5 as testing endpoint -def test_invoke_chat_model_with_tools(): - model = OAIAPICompatLargeLanguageModel() - - result = model.invoke( - model="gpt-3.5-turbo", - credentials={ - "api_key": os.environ.get("OPENAI_API_KEY"), - "endpoint_url": "https://api.openai.com/v1/", - "mode": "chat", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in London?", - ), - ], - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 1024}, - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 - - -def test_get_num_tokens(): - model = OAIAPICompatLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={"api_key": os.environ.get("OPENAI_API_KEY"), "endpoint_url": "https://api.openai.com/v1/"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py b/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py deleted file mode 100644 index cf805eafff..0000000000 --- a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_speech2text.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai_api_compatible.speech2text.speech2text import ( - OAICompatSpeech2TextModel, -) - - -def test_validate_credentials(): - model = OAICompatSpeech2TextModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="whisper-1", - credentials={"api_key": "invalid_key", "endpoint_url": "https://api.openai.com/v1/"}, - ) - - model.validate_credentials( - model="whisper-1", - credentials={"api_key": os.environ.get("OPENAI_API_KEY"), "endpoint_url": "https://api.openai.com/v1/"}, - ) - - -def test_invoke_model(): - model = OAICompatSpeech2TextModel() - - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - - # Get assets directory - assets_dir = os.path.join(os.path.dirname(current_dir), "assets") - - # Construct the path to the audio file - audio_file_path = os.path.join(assets_dir, "audio.mp3") - - # Open the file and get the file object - with open(audio_file_path, "rb") as audio_file: - file = audio_file - - result = model.invoke( - model="whisper-1", - credentials={"api_key": os.environ.get("OPENAI_API_KEY"), "endpoint_url": "https://api.openai.com/v1/"}, - file=file, - user="abc-123", - ) - - assert isinstance(result, str) - assert result == "1, 2, 3, 4, 5, 6, 7, 8, 9, 10" diff --git a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_text_embedding.py b/api/tests/integration_tests/model_runtime/openai_api_compatible/test_text_embedding.py deleted file mode 100644 index 052b41605f..0000000000 --- a/api/tests/integration_tests/model_runtime/openai_api_compatible/test_text_embedding.py +++ /dev/null @@ -1,67 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import ( - OAICompatEmbeddingModel, -) - -""" -Using OpenAI's API as testing endpoint -""" - - -def test_validate_credentials(): - model = OAICompatEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="text-embedding-ada-002", - credentials={"api_key": "invalid_key", "endpoint_url": "https://api.openai.com/v1/", "context_size": 8184}, - ) - - model.validate_credentials( - model="text-embedding-ada-002", - credentials={ - "api_key": os.environ.get("OPENAI_API_KEY"), - "endpoint_url": "https://api.openai.com/v1/", - "context_size": 8184, - }, - ) - - -def test_invoke_model(): - model = OAICompatEmbeddingModel() - - result = model.invoke( - model="text-embedding-ada-002", - credentials={ - "api_key": os.environ.get("OPENAI_API_KEY"), - "endpoint_url": "https://api.openai.com/v1/", - "context_size": 8184, - }, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - assert result.usage.total_tokens == 502 - - -def test_get_num_tokens(): - model = OAICompatEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="text-embedding-ada-002", - credentials={ - "api_key": os.environ.get("OPENAI_API_KEY"), - "endpoint_url": "https://api.openai.com/v1/embeddings", - "context_size": 8184, - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/openllm/__init__.py b/api/tests/integration_tests/model_runtime/openllm/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/openllm/test_embedding.py b/api/tests/integration_tests/model_runtime/openllm/test_embedding.py deleted file mode 100644 index 14d47217af..0000000000 --- a/api/tests/integration_tests/model_runtime/openllm/test_embedding.py +++ /dev/null @@ -1,57 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openllm.text_embedding.text_embedding import OpenLLMTextEmbeddingModel - - -def test_validate_credentials(): - model = OpenLLMTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "server_url": "ww" + os.environ.get("OPENLLM_SERVER_URL"), - }, - ) - - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - ) - - -def test_invoke_model(): - model = OpenLLMTextEmbeddingModel() - - result = model.invoke( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens > 0 - - -def test_get_num_tokens(): - model = OpenLLMTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/openllm/test_llm.py b/api/tests/integration_tests/model_runtime/openllm/test_llm.py deleted file mode 100644 index 35939e3cfe..0000000000 --- a/api/tests/integration_tests/model_runtime/openllm/test_llm.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openllm.llm.llm import OpenLLMLargeLanguageModel - - -def test_validate_credentials_for_chat_model(): - model = OpenLLMLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "server_url": "invalid_key", - }, - ) - - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - ) - - -def test_invoke_model(): - model = OpenLLMLargeLanguageModel() - - response = model.invoke( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_model(): - model = OpenLLMLargeLanguageModel() - - response = model.invoke( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = OpenLLMLargeLanguageModel() - - response = model.get_num_tokens( - model="NOT IMPORTANT", - credentials={ - "server_url": os.environ.get("OPENLLM_SERVER_URL"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - tools=[], - ) - - assert isinstance(response, int) - assert response == 3 diff --git a/api/tests/integration_tests/model_runtime/openrouter/__init__.py b/api/tests/integration_tests/model_runtime/openrouter/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/openrouter/test_llm.py b/api/tests/integration_tests/model_runtime/openrouter/test_llm.py deleted file mode 100644 index ce4876a73a..0000000000 --- a/api/tests/integration_tests/model_runtime/openrouter/test_llm.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.openrouter.llm.llm import OpenRouterLargeLanguageModel - - -def test_validate_credentials(): - model = OpenRouterLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mistralai/mixtral-8x7b-instruct", credentials={"api_key": "invalid_key", "mode": "chat"} - ) - - model.validate_credentials( - model="mistralai/mixtral-8x7b-instruct", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "chat"}, - ) - - -def test_invoke_model(): - model = OpenRouterLargeLanguageModel() - - response = model.invoke( - model="mistralai/mixtral-8x7b-instruct", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "completion"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = OpenRouterLargeLanguageModel() - - response = model.invoke( - model="mistralai/mixtral-8x7b-instruct", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "chat"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_get_num_tokens(): - model = OpenRouterLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="mistralai/mixtral-8x7b-instruct", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/replicate/__init__.py b/api/tests/integration_tests/model_runtime/replicate/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/replicate/test_llm.py b/api/tests/integration_tests/model_runtime/replicate/test_llm.py deleted file mode 100644 index b940005b71..0000000000 --- a/api/tests/integration_tests/model_runtime/replicate/test_llm.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.replicate.llm.llm import ReplicateLargeLanguageModel - - -def test_validate_credentials(): - model = ReplicateLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="meta/llama-2-13b-chat", - credentials={ - "replicate_api_token": "invalid_key", - "model_version": "f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d", - }, - ) - - model.validate_credentials( - model="meta/llama-2-13b-chat", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d", - }, - ) - - -def test_invoke_model(): - model = ReplicateLargeLanguageModel() - - response = model.invoke( - model="meta/llama-2-13b-chat", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = ReplicateLargeLanguageModel() - - response = model.invoke( - model="mistralai/mixtral-8x7b-instruct-v0.1", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "2b56576fcfbe32fa0526897d8385dd3fb3d36ba6fd0dbe033c72886b81ade93e", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_get_num_tokens(): - model = ReplicateLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "2b56576fcfbe32fa0526897d8385dd3fb3d36ba6fd0dbe033c72886b81ade93e", - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 14 diff --git a/api/tests/integration_tests/model_runtime/replicate/test_text_embedding.py b/api/tests/integration_tests/model_runtime/replicate/test_text_embedding.py deleted file mode 100644 index 397715f225..0000000000 --- a/api/tests/integration_tests/model_runtime/replicate/test_text_embedding.py +++ /dev/null @@ -1,136 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.replicate.text_embedding.text_embedding import ReplicateEmbeddingModel - - -def test_validate_credentials_one(): - model = ReplicateEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="replicate/all-mpnet-base-v2", - credentials={ - "replicate_api_token": "invalid_key", - "model_version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", - }, - ) - - model.validate_credentials( - model="replicate/all-mpnet-base-v2", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", - }, - ) - - -def test_validate_credentials_two(): - model = ReplicateEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="nateraw/bge-large-en-v1.5", - credentials={ - "replicate_api_token": "invalid_key", - "model_version": "9cf9f015a9cb9c61d1a2610659cdac4a4ca222f2d3707a68517b18c198a9add1", - }, - ) - - model.validate_credentials( - model="nateraw/bge-large-en-v1.5", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "9cf9f015a9cb9c61d1a2610659cdac4a4ca222f2d3707a68517b18c198a9add1", - }, - ) - - -def test_invoke_model_one(): - model = ReplicateEmbeddingModel() - - result = model.invoke( - model="nateraw/bge-large-en-v1.5", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "9cf9f015a9cb9c61d1a2610659cdac4a4ca222f2d3707a68517b18c198a9add1", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_invoke_model_two(): - model = ReplicateEmbeddingModel() - - result = model.invoke( - model="andreasjansson/clip-features", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "75b33f253f7714a281ad3e9b28f63e3232d583716ef6718f2e46641077ea040a", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_invoke_model_three(): - model = ReplicateEmbeddingModel() - - result = model.invoke( - model="replicate/all-mpnet-base-v2", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_invoke_model_four(): - model = ReplicateEmbeddingModel() - - result = model.invoke( - model="nateraw/jina-embeddings-v2-base-en", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "f8367a1c072ba2bc28af549d1faeacfe9b88b3f0e475add7a75091dac507f79e", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = ReplicateEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="nateraw/jina-embeddings-v2-base-en", - credentials={ - "replicate_api_token": os.environ.get("REPLICATE_API_KEY"), - "model_version": "f8367a1c072ba2bc28af549d1faeacfe9b88b3f0e475add7a75091dac507f79e", - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/sagemaker/__init__.py b/api/tests/integration_tests/model_runtime/sagemaker/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/sagemaker/test_provider.py b/api/tests/integration_tests/model_runtime/sagemaker/test_provider.py deleted file mode 100644 index 9f0b439d6c..0000000000 --- a/api/tests/integration_tests/model_runtime/sagemaker/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.sagemaker.sagemaker import SageMakerProvider - - -def test_validate_provider_credentials(): - provider = SageMakerProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={}) diff --git a/api/tests/integration_tests/model_runtime/sagemaker/test_rerank.py b/api/tests/integration_tests/model_runtime/sagemaker/test_rerank.py deleted file mode 100644 index d5a6798a1e..0000000000 --- a/api/tests/integration_tests/model_runtime/sagemaker/test_rerank.py +++ /dev/null @@ -1,55 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.sagemaker.rerank.rerank import SageMakerRerankModel - - -def test_validate_credentials(): - model = SageMakerRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="bge-m3-rerank-v2", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - - -def test_invoke_model(): - model = SageMakerRerankModel() - - result = model.invoke( - model="bge-m3-rerank-v2", - credentials={ - "aws_region": os.getenv("AWS_REGION"), - "aws_access_key": os.getenv("AWS_ACCESS_KEY"), - "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), - }, - query="What is the capital of the United States?", - docs=[ - "Carson City is the capital city of the American state of Nevada. At the 2010 United States " - "Census, Carson City had a population of 55,274.", - "The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that " - "are a political division controlled by the United States. Its capital is Saipan.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 1 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/sagemaker/test_text_embedding.py b/api/tests/integration_tests/model_runtime/sagemaker/test_text_embedding.py deleted file mode 100644 index e4e404c7a8..0000000000 --- a/api/tests/integration_tests/model_runtime/sagemaker/test_text_embedding.py +++ /dev/null @@ -1,33 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.sagemaker.text_embedding.text_embedding import SageMakerEmbeddingModel - - -def test_validate_credentials(): - model = SageMakerEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="bge-m3", credentials={}) - - model.validate_credentials(model="bge-m3-embedding", credentials={}) - - -def test_invoke_model(): - model = SageMakerEmbeddingModel() - - result = model.invoke(model="bge-m3-embedding", credentials={}, texts=["hello", "world"], user="abc-123") - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - - -def test_get_num_tokens(): - model = SageMakerEmbeddingModel() - - num_tokens = model.get_num_tokens(model="bge-m3-embedding", credentials={}, texts=[]) - - assert num_tokens == 0 diff --git a/api/tests/integration_tests/model_runtime/siliconflow/__init__.py b/api/tests/integration_tests/model_runtime/siliconflow/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_llm.py b/api/tests/integration_tests/model_runtime/siliconflow/test_llm.py deleted file mode 100644 index f47c9c5588..0000000000 --- a/api/tests/integration_tests/model_runtime/siliconflow/test_llm.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.siliconflow.llm.llm import SiliconflowLargeLanguageModel - - -def test_validate_credentials(): - model = SiliconflowLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="deepseek-ai/DeepSeek-V2-Chat", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="deepseek-ai/DeepSeek-V2-Chat", credentials={"api_key": os.environ.get("API_KEY")}) - - -def test_invoke_model(): - model = SiliconflowLargeLanguageModel() - - response = model.invoke( - model="deepseek-ai/DeepSeek-V2-Chat", - credentials={"api_key": os.environ.get("API_KEY")}, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = SiliconflowLargeLanguageModel() - - response = model.invoke( - model="deepseek-ai/DeepSeek-V2-Chat", - credentials={"api_key": os.environ.get("API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = SiliconflowLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="deepseek-ai/DeepSeek-V2-Chat", - credentials={"api_key": os.environ.get("API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 12 diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_provider.py b/api/tests/integration_tests/model_runtime/siliconflow/test_provider.py deleted file mode 100644 index 8f70210b7a..0000000000 --- a/api/tests/integration_tests/model_runtime/siliconflow/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.siliconflow.siliconflow import SiliconflowProvider - - -def test_validate_provider_credentials(): - provider = SiliconflowProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_rerank.py b/api/tests/integration_tests/model_runtime/siliconflow/test_rerank.py deleted file mode 100644 index ad794613f9..0000000000 --- a/api/tests/integration_tests/model_runtime/siliconflow/test_rerank.py +++ /dev/null @@ -1,47 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.siliconflow.rerank.rerank import SiliconflowRerankModel - - -def test_validate_credentials(): - model = SiliconflowRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="BAAI/bge-reranker-v2-m3", - credentials={"api_key": "invalid_key"}, - ) - - model.validate_credentials( - model="BAAI/bge-reranker-v2-m3", - credentials={ - "api_key": os.environ.get("API_KEY"), - }, - ) - - -def test_invoke_model(): - model = SiliconflowRerankModel() - - result = model.invoke( - model="BAAI/bge-reranker-v2-m3", - credentials={ - "api_key": os.environ.get("API_KEY"), - }, - query="Who is Kasumi?", - docs=[ - 'Kasumi is a girl\'s name of Japanese origin meaning "mist".', - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ", - "and she leads a team named PopiParty.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 0 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py b/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py deleted file mode 100644 index 0502ba5ab4..0000000000 --- a/api/tests/integration_tests/model_runtime/siliconflow/test_speech2text.py +++ /dev/null @@ -1,45 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.siliconflow.speech2text.speech2text import SiliconflowSpeech2TextModel - - -def test_validate_credentials(): - model = SiliconflowSpeech2TextModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="iic/SenseVoiceSmall", - credentials={"api_key": "invalid_key"}, - ) - - model.validate_credentials( - model="iic/SenseVoiceSmall", - credentials={"api_key": os.environ.get("API_KEY")}, - ) - - -def test_invoke_model(): - model = SiliconflowSpeech2TextModel() - - # Get the directory of the current file - current_dir = os.path.dirname(os.path.abspath(__file__)) - - # Get assets directory - assets_dir = os.path.join(os.path.dirname(current_dir), "assets") - - # Construct the path to the audio file - audio_file_path = os.path.join(assets_dir, "audio.mp3") - - # Open the file and get the file object - with open(audio_file_path, "rb") as audio_file: - file = audio_file - - result = model.invoke( - model="iic/SenseVoiceSmall", credentials={"api_key": os.environ.get("API_KEY")}, file=file - ) - - assert isinstance(result, str) - assert result == "1,2,3,4,5,6,7,8,9,10." diff --git a/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py b/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py deleted file mode 100644 index ab143c1061..0000000000 --- a/api/tests/integration_tests/model_runtime/siliconflow/test_text_embedding.py +++ /dev/null @@ -1,60 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.siliconflow.text_embedding.text_embedding import ( - SiliconflowTextEmbeddingModel, -) - - -def test_validate_credentials(): - model = SiliconflowTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="BAAI/bge-large-zh-v1.5", - credentials={"api_key": "invalid_key"}, - ) - - model.validate_credentials( - model="BAAI/bge-large-zh-v1.5", - credentials={ - "api_key": os.environ.get("API_KEY"), - }, - ) - - -def test_invoke_model(): - model = SiliconflowTextEmbeddingModel() - - result = model.invoke( - model="BAAI/bge-large-zh-v1.5", - credentials={ - "api_key": os.environ.get("API_KEY"), - }, - texts=[ - "hello", - "world", - ], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens == 6 - - -def test_get_num_tokens(): - model = SiliconflowTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="BAAI/bge-large-zh-v1.5", - credentials={ - "api_key": os.environ.get("API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/spark/__init__.py b/api/tests/integration_tests/model_runtime/spark/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/spark/test_llm.py b/api/tests/integration_tests/model_runtime/spark/test_llm.py deleted file mode 100644 index 4fe2fd8c0a..0000000000 --- a/api/tests/integration_tests/model_runtime/spark/test_llm.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.spark.llm.llm import SparkLargeLanguageModel - - -def test_validate_credentials(): - model = SparkLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="spark-1.5", credentials={"app_id": "invalid_key"}) - - model.validate_credentials( - model="spark-1.5", - credentials={ - "app_id": os.environ.get("SPARK_APP_ID"), - "api_secret": os.environ.get("SPARK_API_SECRET"), - "api_key": os.environ.get("SPARK_API_KEY"), - }, - ) - - -def test_invoke_model(): - model = SparkLargeLanguageModel() - - response = model.invoke( - model="spark-1.5", - credentials={ - "app_id": os.environ.get("SPARK_APP_ID"), - "api_secret": os.environ.get("SPARK_API_SECRET"), - "api_key": os.environ.get("SPARK_API_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = SparkLargeLanguageModel() - - response = model.invoke( - model="spark-1.5", - credentials={ - "app_id": os.environ.get("SPARK_APP_ID"), - "api_secret": os.environ.get("SPARK_API_SECRET"), - "api_key": os.environ.get("SPARK_API_KEY"), - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.5, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = SparkLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="spark-1.5", - credentials={ - "app_id": os.environ.get("SPARK_APP_ID"), - "api_secret": os.environ.get("SPARK_API_SECRET"), - "api_key": os.environ.get("SPARK_API_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 14 diff --git a/api/tests/integration_tests/model_runtime/spark/test_provider.py b/api/tests/integration_tests/model_runtime/spark/test_provider.py deleted file mode 100644 index 9da0df6bb3..0000000000 --- a/api/tests/integration_tests/model_runtime/spark/test_provider.py +++ /dev/null @@ -1,21 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.spark.spark import SparkProvider - - -def test_validate_provider_credentials(): - provider = SparkProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={ - "app_id": os.environ.get("SPARK_APP_ID"), - "api_secret": os.environ.get("SPARK_API_SECRET"), - "api_key": os.environ.get("SPARK_API_KEY"), - } - ) diff --git a/api/tests/integration_tests/model_runtime/stepfun/__init__.py b/api/tests/integration_tests/model_runtime/stepfun/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/stepfun/test_llm.py b/api/tests/integration_tests/model_runtime/stepfun/test_llm.py deleted file mode 100644 index c03b1bae1f..0000000000 --- a/api/tests/integration_tests/model_runtime/stepfun/test_llm.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - ImagePromptMessageContent, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.stepfun.llm.llm import StepfunLargeLanguageModel - - -def test_validate_credentials(): - model = StepfunLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="step-1-8k", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="step-1-8k", credentials={"api_key": os.environ.get("STEPFUN_API_KEY")}) - - -def test_invoke_model(): - model = StepfunLargeLanguageModel() - - response = model.invoke( - model="step-1-8k", - credentials={"api_key": os.environ.get("STEPFUN_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.9, "top_p": 0.7}, - stop=["Hi"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = StepfunLargeLanguageModel() - - response = model.invoke( - model="step-1-8k", - credentials={"api_key": os.environ.get("STEPFUN_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.9, "top_p": 0.7}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_customizable_model_schema(): - model = StepfunLargeLanguageModel() - - schema = model.get_customizable_model_schema( - model="step-1-8k", credentials={"api_key": os.environ.get("STEPFUN_API_KEY")} - ) - assert isinstance(schema, AIModelEntity) - - -def test_invoke_chat_model_with_tools(): - model = StepfunLargeLanguageModel() - - result = model.invoke( - model="step-1-8k", - credentials={"api_key": os.environ.get("STEPFUN_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in Shanghai?", - ), - ], - model_parameters={"temperature": 0.9, "max_tokens": 100}, - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - PromptMessageTool( - name="get_stock_price", - description="Get the current stock price", - parameters={ - "type": "object", - "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, - "required": ["symbol"], - }, - ), - ], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 diff --git a/api/tests/integration_tests/model_runtime/test_tiktoken.py b/api/tests/integration_tests/model_runtime/test_tiktoken.py new file mode 100644 index 0000000000..f92d9dc603 --- /dev/null +++ b/api/tests/integration_tests/model_runtime/test_tiktoken.py @@ -0,0 +1,11 @@ +import os + +import tiktoken + +from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer + + +def test_tiktoken(): + os.environ["TIKTOKEN_CACHE_DIR"] = "/tmp/.tiktoken_cache" + GPT2Tokenizer.get_num_tokens("Hello, world!") + assert tiktoken.registry.ENCODING_CONSTRUCTORS is not None diff --git a/api/tests/integration_tests/model_runtime/togetherai/__init__.py b/api/tests/integration_tests/model_runtime/togetherai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/togetherai/test_llm.py b/api/tests/integration_tests/model_runtime/togetherai/test_llm.py deleted file mode 100644 index 06ebc2a82d..0000000000 --- a/api/tests/integration_tests/model_runtime/togetherai/test_llm.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.togetherai.llm.llm import TogetherAILargeLanguageModel - - -def test_validate_credentials(): - model = TogetherAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", credentials={"api_key": "invalid_key", "mode": "chat"} - ) - - model.validate_credentials( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "chat"}, - ) - - -def test_invoke_model(): - model = TogetherAILargeLanguageModel() - - response = model.invoke( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "completion"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = TogetherAILargeLanguageModel() - - response = model.invoke( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={"api_key": os.environ.get("TOGETHER_API_KEY"), "mode": "chat"}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Who are you?"), - ], - model_parameters={ - "temperature": 1.0, - "top_k": 2, - "top_p": 0.5, - }, - stop=["How"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - - -def test_get_num_tokens(): - model = TogetherAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="mistralai/Mixtral-8x7B-Instruct-v0.1", - credentials={ - "api_key": os.environ.get("TOGETHER_API_KEY"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/tongyi/__init__.py b/api/tests/integration_tests/model_runtime/tongyi/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/tongyi/test_llm.py b/api/tests/integration_tests/model_runtime/tongyi/test_llm.py deleted file mode 100644 index 61650735f2..0000000000 --- a/api/tests/integration_tests/model_runtime/tongyi/test_llm.py +++ /dev/null @@ -1,75 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.tongyi.llm.llm import TongyiLargeLanguageModel - - -def test_validate_credentials(): - model = TongyiLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="qwen-turbo", credentials={"dashscope_api_key": "invalid_key"}) - - model.validate_credentials( - model="qwen-turbo", credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")} - ) - - -def test_invoke_model(): - model = TongyiLargeLanguageModel() - - response = model.invoke( - model="qwen-turbo", - credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = TongyiLargeLanguageModel() - - response = model.invoke( - model="qwen-turbo", - credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = TongyiLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="qwen-turbo", - credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 12 diff --git a/api/tests/integration_tests/model_runtime/tongyi/test_provider.py b/api/tests/integration_tests/model_runtime/tongyi/test_provider.py deleted file mode 100644 index 0bc96c84e7..0000000000 --- a/api/tests/integration_tests/model_runtime/tongyi/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.tongyi.tongyi import TongyiProvider - - -def test_validate_provider_credentials(): - provider = TongyiProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials( - credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")} - ) diff --git a/api/tests/integration_tests/model_runtime/tongyi/test_response_format.py b/api/tests/integration_tests/model_runtime/tongyi/test_response_format.py deleted file mode 100644 index 905e7907fd..0000000000 --- a/api/tests/integration_tests/model_runtime/tongyi/test_response_format.py +++ /dev/null @@ -1,80 +0,0 @@ -import json -import os -from collections.abc import Generator - -from core.model_runtime.entities.llm_entities import LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage -from core.model_runtime.model_providers.tongyi.llm.llm import TongyiLargeLanguageModel - - -def test_invoke_model_with_json_response(): - """ - Test the invocation of a model with JSON response. - """ - model_list = [ - "qwen-max-0403", - "qwen-max-1201", - "qwen-max-longcontext", - "qwen-max", - "qwen-plus-chat", - "qwen-plus", - "qwen-turbo-chat", - "qwen-turbo", - ] - for model_name in model_list: - print("testing model: ", model_name) - invoke_model_with_json_response(model_name) - - -def invoke_model_with_json_response(model_name="qwen-max-0403"): - """ - Method to invoke the model with JSON response format. - Args: - model_name (str): The name of the model to invoke. Defaults to "qwen-max-0403". - - Returns: - None - """ - model = TongyiLargeLanguageModel() - - response = model.invoke( - model=model_name, - credentials={"dashscope_api_key": os.environ.get("TONGYI_DASHSCOPE_API_KEY")}, - prompt_messages=[ - UserPromptMessage(content='output json data with format `{"data": "test", "code": 200, "msg": "success"}') - ], - model_parameters={ - "temperature": 0.5, - "max_tokens": 50, - "response_format": "JSON", - }, - stream=True, - user="abc-123", - ) - print("=====================================") - print(response) - assert isinstance(response, Generator) - output = "" - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - output += chunk.delta.message.content - assert is_json(output) - - -def is_json(s): - """ - Check if a string is a valid JSON. - - Args: - s (str): The string to check. - - Returns: - bool: True if the string is a valid JSON, False otherwise. - """ - try: - json.loads(s) - except ValueError: - return False - return True diff --git a/api/tests/integration_tests/model_runtime/upstage/__init__.py b/api/tests/integration_tests/model_runtime/upstage/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/upstage/test_llm.py b/api/tests/integration_tests/model_runtime/upstage/test_llm.py deleted file mode 100644 index bc7517acbe..0000000000 --- a/api/tests/integration_tests/model_runtime/upstage/test_llm.py +++ /dev/null @@ -1,186 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity, ModelType -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.model_runtime.model_providers.upstage.llm.llm import UpstageLargeLanguageModel - -"""FOR MOCK FIXTURES, DO NOT REMOVE""" -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -def test_predefined_models(): - model = UpstageLargeLanguageModel() - model_schemas = model.predefined_models() - - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock): - model = UpstageLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - # model name to gpt-3.5-turbo because of mocking - model.validate_credentials(model="gpt-3.5-turbo", credentials={"upstage_api_key": "invalid_key"}) - - model.validate_credentials( - model="solar-1-mini-chat", credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model(setup_openai_mock): - model = UpstageLargeLanguageModel() - - result = model.invoke( - model="solar-1-mini-chat", - credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.0, - "top_p": 1.0, - "presence_penalty": 0.0, - "frequency_penalty": 0.0, - "max_tokens": 10, - }, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert len(result.message.content) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_chat_model_with_tools(setup_openai_mock): - model = UpstageLargeLanguageModel() - - result = model.invoke( - model="solar-1-mini-chat", - credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage( - content="what's the weather today in London?", - ), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - PromptMessageTool( - name="get_stock_price", - description="Get the current stock price", - parameters={ - "type": "object", - "properties": {"symbol": {"type": "string", "description": "The stock symbol"}}, - "required": ["symbol"], - }, - ), - ], - stream=False, - user="abc-123", - ) - - assert isinstance(result, LLMResult) - assert isinstance(result.message, AssistantPromptMessage) - assert len(result.message.tool_calls) > 0 - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_invoke_stream_chat_model(setup_openai_mock): - model = UpstageLargeLanguageModel() - - result = model.invoke( - model="solar-1-mini-chat", - credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={"temperature": 0.0, "max_tokens": 100}, - stream=True, - user="abc-123", - ) - - assert isinstance(result, Generator) - - for chunk in result: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - if chunk.delta.finish_reason is not None: - assert chunk.delta.usage is not None - assert chunk.delta.usage.completion_tokens > 0 - - -def test_get_num_tokens(): - model = UpstageLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="solar-1-mini-chat", - credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - ) - - assert num_tokens == 13 - - num_tokens = model.get_num_tokens( - model="solar-1-mini-chat", - credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_weather", - description="Determine weather in my location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ), - ], - ) - - assert num_tokens == 106 diff --git a/api/tests/integration_tests/model_runtime/upstage/test_provider.py b/api/tests/integration_tests/model_runtime/upstage/test_provider.py deleted file mode 100644 index 9d83779aa0..0000000000 --- a/api/tests/integration_tests/model_runtime/upstage/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.upstage.upstage import UpstageProvider -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["chat"]], indirect=True) -def test_validate_provider_credentials(setup_openai_mock): - provider = UpstageProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py b/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py deleted file mode 100644 index 8c83172fa3..0000000000 --- a/api/tests/integration_tests/model_runtime/upstage/test_text_embedding.py +++ /dev/null @@ -1,54 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.upstage.text_embedding.text_embedding import UpstageTextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_validate_credentials(setup_openai_mock): - model = UpstageTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="solar-embedding-1-large-passage", credentials={"upstage_api_key": "invalid_key"} - ) - - model.validate_credentials( - model="solar-embedding-1-large-passage", credentials={"upstage_api_key": os.environ.get("UPSTAGE_API_KEY")} - ) - - -@pytest.mark.parametrize("setup_openai_mock", [["text_embedding"]], indirect=True) -def test_invoke_model(setup_openai_mock): - model = UpstageTextEmbeddingModel() - - result = model.invoke( - model="solar-embedding-1-large-passage", - credentials={ - "upstage_api_key": os.environ.get("UPSTAGE_API_KEY"), - }, - texts=["hello", "world", " ".join(["long_text"] * 100), " ".join(["another_long_text"] * 100)], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 4 - assert result.usage.total_tokens == 2 - - -def test_get_num_tokens(): - model = UpstageTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="solar-embedding-1-large-passage", - credentials={ - "upstage_api_key": os.environ.get("UPSTAGE_API_KEY"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 5 diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/__init__.py b/api/tests/integration_tests/model_runtime/volcengine_maas/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py b/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py deleted file mode 100644 index f831c063a4..0000000000 --- a/api/tests/integration_tests/model_runtime/volcengine_maas/test_embedding.py +++ /dev/null @@ -1,79 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.volcengine_maas.text_embedding.text_embedding import ( - VolcengineMaaSTextEmbeddingModel, -) - - -def test_validate_credentials(): - model = VolcengineMaaSTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": "INVALID", - "volc_secret_access_key": "INVALID", - "endpoint_id": "INVALID", - "base_model_name": "Doubao-embedding", - }, - ) - - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_EMBEDDING_ENDPOINT_ID"), - "base_model_name": "Doubao-embedding", - }, - ) - - -def test_invoke_model(): - model = VolcengineMaaSTextEmbeddingModel() - - result = model.invoke( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_EMBEDDING_ENDPOINT_ID"), - "base_model_name": "Doubao-embedding", - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens > 0 - - -def test_get_num_tokens(): - model = VolcengineMaaSTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_EMBEDDING_ENDPOINT_ID"), - "base_model_name": "Doubao-embedding", - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py b/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py deleted file mode 100644 index 8ff9c41404..0000000000 --- a/api/tests/integration_tests/model_runtime/volcengine_maas/test_llm.py +++ /dev/null @@ -1,118 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.volcengine_maas.llm.llm import VolcengineMaaSLargeLanguageModel - - -def test_validate_credentials_for_chat_model(): - model = VolcengineMaaSLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": "INVALID", - "volc_secret_access_key": "INVALID", - "endpoint_id": "INVALID", - }, - ) - - model.validate_credentials( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_MODEL_ENDPOINT_ID"), - }, - ) - - -def test_invoke_model(): - model = VolcengineMaaSLargeLanguageModel() - - response = model.invoke( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_MODEL_ENDPOINT_ID"), - "base_model_name": "Skylark2-pro-4k", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_model(): - model = VolcengineMaaSLargeLanguageModel() - - response = model.invoke( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_MODEL_ENDPOINT_ID"), - "base_model_name": "Skylark2-pro-4k", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "top_k": 1, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = VolcengineMaaSLargeLanguageModel() - - response = model.get_num_tokens( - model="NOT IMPORTANT", - credentials={ - "api_endpoint_host": "maas-api.ml-platform-cn-beijing.volces.com", - "volc_region": "cn-beijing", - "volc_access_key_id": os.environ.get("VOLC_API_KEY"), - "volc_secret_access_key": os.environ.get("VOLC_SECRET_KEY"), - "endpoint_id": os.environ.get("VOLC_MODEL_ENDPOINT_ID"), - "base_model_name": "Skylark2-pro-4k", - }, - prompt_messages=[UserPromptMessage(content="Hello World!")], - tools=[], - ) - - assert isinstance(response, int) - assert response == 6 diff --git a/api/tests/integration_tests/model_runtime/wenxin/__init__.py b/api/tests/integration_tests/model_runtime/wenxin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/wenxin/test_embedding.py b/api/tests/integration_tests/model_runtime/wenxin/test_embedding.py deleted file mode 100644 index ac38340aec..0000000000 --- a/api/tests/integration_tests/model_runtime/wenxin/test_embedding.py +++ /dev/null @@ -1,69 +0,0 @@ -import os -from time import sleep - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.model_providers.wenxin.text_embedding.text_embedding import WenxinTextEmbeddingModel - - -def test_invoke_embedding_v1(): - sleep(3) - model = WenxinTextEmbeddingModel() - - response = model.invoke( - model="embedding-v1", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - texts=["hello", "你好", "xxxxx"], - user="abc-123", - ) - - assert isinstance(response, TextEmbeddingResult) - assert len(response.embeddings) == 3 - assert isinstance(response.embeddings[0], list) - - -def test_invoke_embedding_bge_large_en(): - sleep(3) - model = WenxinTextEmbeddingModel() - - response = model.invoke( - model="bge-large-en", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - texts=["hello", "你好", "xxxxx"], - user="abc-123", - ) - - assert isinstance(response, TextEmbeddingResult) - assert len(response.embeddings) == 3 - assert isinstance(response.embeddings[0], list) - - -def test_invoke_embedding_bge_large_zh(): - sleep(3) - model = WenxinTextEmbeddingModel() - - response = model.invoke( - model="bge-large-zh", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - texts=["hello", "你好", "xxxxx"], - user="abc-123", - ) - - assert isinstance(response, TextEmbeddingResult) - assert len(response.embeddings) == 3 - assert isinstance(response.embeddings[0], list) - - -def test_invoke_embedding_tao_8k(): - sleep(3) - model = WenxinTextEmbeddingModel() - - response = model.invoke( - model="tao-8k", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - texts=["hello", "你好", "xxxxx"], - user="abc-123", - ) - - assert isinstance(response, TextEmbeddingResult) - assert len(response.embeddings) == 3 - assert isinstance(response.embeddings[0], list) diff --git a/api/tests/integration_tests/model_runtime/wenxin/test_llm.py b/api/tests/integration_tests/model_runtime/wenxin/test_llm.py deleted file mode 100644 index e2e58f15e0..0000000000 --- a/api/tests/integration_tests/model_runtime/wenxin/test_llm.py +++ /dev/null @@ -1,214 +0,0 @@ -import os -from collections.abc import Generator -from time import sleep - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.wenxin.llm.llm import ErnieBotLargeLanguageModel - - -def test_predefined_models(): - model = ErnieBotLargeLanguageModel() - model_schemas = model.predefined_models() - assert len(model_schemas) >= 1 - assert isinstance(model_schemas[0], AIModelEntity) - - -def test_validate_credentials_for_chat_model(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="ernie-bot", credentials={"api_key": "invalid_key", "secret_key": "invalid_key"} - ) - - model.validate_credentials( - model="ernie-bot", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - ) - - -def test_invoke_model_ernie_bot(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_model_ernie_bot_turbo(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot-turbo", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_model_ernie_8k(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot-8k", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_model_ernie_bot_4(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot-4", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -def test_invoke_stream_model(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-3.5-8k", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_invoke_model_with_system(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[SystemPromptMessage(content="你是Kasumi"), UserPromptMessage(content="你是谁?")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert "kasumi" in response.message.content.lower() - - -def test_invoke_with_search(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.invoke( - model="ernie-bot", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="北京今天的天气怎么样")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - "disable_search": True, - }, - stop=[], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - total_message = "" - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - total_message += chunk.delta.message.content - print(chunk.delta.message.content) - assert len(chunk.delta.message.content) > 0 if not chunk.delta.finish_reason else True - - # there should be 对不起、我不能、不支持…… - assert "不" in total_message or "抱歉" in total_message or "无法" in total_message - - -def test_get_num_tokens(): - sleep(3) - model = ErnieBotLargeLanguageModel() - - response = model.get_num_tokens( - model="ernie-bot", - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - tools=[], - ) - - assert isinstance(response, int) - assert response == 10 diff --git a/api/tests/integration_tests/model_runtime/wenxin/test_provider.py b/api/tests/integration_tests/model_runtime/wenxin/test_provider.py deleted file mode 100644 index 337c3d2a80..0000000000 --- a/api/tests/integration_tests/model_runtime/wenxin/test_provider.py +++ /dev/null @@ -1,17 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.wenxin.wenxin import WenxinProvider - - -def test_validate_provider_credentials(): - provider = WenxinProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={"api_key": "hahahaha", "secret_key": "hahahaha"}) - - provider.validate_provider_credentials( - credentials={"api_key": os.environ.get("WENXIN_API_KEY"), "secret_key": os.environ.get("WENXIN_SECRET_KEY")} - ) diff --git a/api/tests/integration_tests/model_runtime/xinference/__init__.py b/api/tests/integration_tests/model_runtime/xinference/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/xinference/test_embeddings.py b/api/tests/integration_tests/model_runtime/xinference/test_embeddings.py deleted file mode 100644 index 8e778d005a..0000000000 --- a/api/tests/integration_tests/model_runtime/xinference/test_embeddings.py +++ /dev/null @@ -1,64 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel -from tests.integration_tests.model_runtime.__mock.xinference import MOCK, setup_xinference_mock - - -@pytest.mark.parametrize("setup_xinference_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_xinference_mock): - model = XinferenceTextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="bge-base-en", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": "www " + os.environ.get("XINFERENCE_EMBEDDINGS_MODEL_UID"), - }, - ) - - model.validate_credentials( - model="bge-base-en", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_EMBEDDINGS_MODEL_UID"), - }, - ) - - -@pytest.mark.parametrize("setup_xinference_mock", [["none"]], indirect=True) -def test_invoke_model(setup_xinference_mock): - model = XinferenceTextEmbeddingModel() - - result = model.invoke( - model="bge-base-en", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_EMBEDDINGS_MODEL_UID"), - }, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens > 0 - - -def test_get_num_tokens(): - model = XinferenceTextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="bge-base-en", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_EMBEDDINGS_MODEL_UID"), - }, - texts=["hello", "world"], - ) - - assert num_tokens == 2 diff --git a/api/tests/integration_tests/model_runtime/xinference/test_llm.py b/api/tests/integration_tests/model_runtime/xinference/test_llm.py deleted file mode 100644 index fb5e03855d..0000000000 --- a/api/tests/integration_tests/model_runtime/xinference/test_llm.py +++ /dev/null @@ -1,366 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.model_entities import AIModelEntity -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel - -"""FOR MOCK FIXTURES, DO NOT REMOVE""" -from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock -from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) -def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": "www " + os.environ.get("XINFERENCE_CHAT_MODEL_UID"), - }, - ) - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="aaaaa", credentials={"server_url": "", "model_uid": ""}) - - model.validate_credentials( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_CHAT_MODEL_UID"), - }, - ) - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) -def test_invoke_chat_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - response = model.invoke( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_CHAT_MODEL_UID"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("chat", "none")], indirect=True) -def test_invoke_stream_chat_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - response = model.invoke( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_CHAT_MODEL_UID"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -""" - Function calling of xinference does not support stream mode currently -""" -# def test_invoke_stream_chat_model_with_functions(): -# model = XinferenceAILargeLanguageModel() - -# response = model.invoke( -# model='ChatGLM3-6b', -# credentials={ -# 'server_url': os.environ.get('XINFERENCE_SERVER_URL'), -# 'model_type': 'text-generation', -# 'model_name': 'ChatGLM3', -# 'model_uid': os.environ.get('XINFERENCE_CHAT_MODEL_UID') -# }, -# prompt_messages=[ -# SystemPromptMessage( -# content='你是一个天气机器人,可以通过调用函数来获取天气信息', -# ), -# UserPromptMessage( -# content='波士顿天气如何?' -# ) -# ], -# model_parameters={ -# 'temperature': 0, -# 'top_p': 1.0, -# }, -# stop=['you'], -# user='abc-123', -# stream=True, -# tools=[ -# PromptMessageTool( -# name='get_current_weather', -# description='Get the current weather in a given location', -# parameters={ -# "type": "object", -# "properties": { -# "location": { -# "type": "string", -# "description": "The city and state e.g. San Francisco, CA" -# }, -# "unit": { -# "type": "string", -# "enum": ["celsius", "fahrenheit"] -# } -# }, -# "required": [ -# "location" -# ] -# } -# ) -# ] -# ) - -# assert isinstance(response, Generator) - -# call: LLMResultChunk = None -# chunks = [] - -# for chunk in response: -# chunks.append(chunk) -# assert isinstance(chunk, LLMResultChunk) -# assert isinstance(chunk.delta, LLMResultChunkDelta) -# assert isinstance(chunk.delta.message, AssistantPromptMessage) -# assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - -# if chunk.delta.message.tool_calls and len(chunk.delta.message.tool_calls) > 0: -# call = chunk -# break - -# assert call is not None -# assert call.delta.message.tool_calls[0].function.name == 'get_current_weather' - -# def test_invoke_chat_model_with_functions(): -# model = XinferenceAILargeLanguageModel() - -# response = model.invoke( -# model='ChatGLM3-6b', -# credentials={ -# 'server_url': os.environ.get('XINFERENCE_SERVER_URL'), -# 'model_type': 'text-generation', -# 'model_name': 'ChatGLM3', -# 'model_uid': os.environ.get('XINFERENCE_CHAT_MODEL_UID') -# }, -# prompt_messages=[ -# UserPromptMessage( -# content='What is the weather like in San Francisco?' -# ) -# ], -# model_parameters={ -# 'temperature': 0.7, -# 'top_p': 1.0, -# }, -# stop=['you'], -# user='abc-123', -# stream=False, -# tools=[ -# PromptMessageTool( -# name='get_current_weather', -# description='Get the current weather in a given location', -# parameters={ -# "type": "object", -# "properties": { -# "location": { -# "type": "string", -# "description": "The city and state e.g. San Francisco, CA" -# }, -# "unit": { -# "type": "string", -# "enum": [ -# "c", -# "f" -# ] -# } -# }, -# "required": [ -# "location" -# ] -# } -# ) -# ] -# ) - -# assert isinstance(response, LLMResult) -# assert len(response.message.content) > 0 -# assert response.usage.total_tokens > 0 -# assert response.message.tool_calls[0].function.name == 'get_current_weather' - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) -def test_validate_credentials_for_generation_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="alapaca", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": "www " + os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - ) - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="alapaca", credentials={"server_url": "", "model_uid": ""}) - - model.validate_credentials( - model="alapaca", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - ) - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) -def test_invoke_generation_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - response = model.invoke( - model="alapaca", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - prompt_messages=[UserPromptMessage(content="the United States is")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - user="abc-123", - stream=False, - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - assert response.usage.total_tokens > 0 - - -@pytest.mark.parametrize(("setup_openai_mock", "setup_xinference_mock"), [("completion", "none")], indirect=True) -def test_invoke_stream_generation_model(setup_openai_mock, setup_xinference_mock): - model = XinferenceAILargeLanguageModel() - - response = model.invoke( - model="alapaca", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - prompt_messages=[UserPromptMessage(content="the United States is")], - model_parameters={ - "temperature": 0.7, - "top_p": 1.0, - }, - stop=["you"], - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = XinferenceAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ) - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 77 - - num_tokens = model.get_num_tokens( - model="ChatGLM3", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_GENERATION_MODEL_UID"), - }, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert isinstance(num_tokens, int) - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/xinference/test_rerank.py b/api/tests/integration_tests/model_runtime/xinference/test_rerank.py deleted file mode 100644 index 71ac4eef7c..0000000000 --- a/api/tests/integration_tests/model_runtime/xinference/test_rerank.py +++ /dev/null @@ -1,52 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.rerank_entities import RerankResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel -from tests.integration_tests.model_runtime.__mock.xinference import MOCK, setup_xinference_mock - - -@pytest.mark.parametrize("setup_xinference_mock", [["none"]], indirect=True) -def test_validate_credentials(setup_xinference_mock): - model = XinferenceRerankModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials( - model="bge-reranker-base", - credentials={"server_url": "awdawdaw", "model_uid": os.environ.get("XINFERENCE_RERANK_MODEL_UID")}, - ) - - model.validate_credentials( - model="bge-reranker-base", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_RERANK_MODEL_UID"), - }, - ) - - -@pytest.mark.parametrize("setup_xinference_mock", [["none"]], indirect=True) -def test_invoke_model(setup_xinference_mock): - model = XinferenceRerankModel() - - result = model.invoke( - model="bge-reranker-base", - credentials={ - "server_url": os.environ.get("XINFERENCE_SERVER_URL"), - "model_uid": os.environ.get("XINFERENCE_RERANK_MODEL_UID"), - }, - query="Who is Kasumi?", - docs=[ - 'Kasumi is a girl\'s name of Japanese origin meaning "mist".', - "Her music is a kawaii bass, a mix of future bass, pop, and kawaii music ", - "and she leads a team named PopiParty.", - ], - score_threshold=0.8, - ) - - assert isinstance(result, RerankResult) - assert len(result.docs) == 1 - assert result.docs[0].index == 0 - assert result.docs[0].score >= 0.8 diff --git a/api/tests/integration_tests/model_runtime/zhinao/__init__.py b/api/tests/integration_tests/model_runtime/zhinao/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/zhinao/test_llm.py b/api/tests/integration_tests/model_runtime/zhinao/test_llm.py deleted file mode 100644 index 4ca1b86476..0000000000 --- a/api/tests/integration_tests/model_runtime/zhinao/test_llm.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.zhinao.llm.llm import ZhinaoLargeLanguageModel - - -def test_validate_credentials(): - model = ZhinaoLargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="360gpt2-pro", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="360gpt2-pro", credentials={"api_key": os.environ.get("ZHINAO_API_KEY")}) - - -def test_invoke_model(): - model = ZhinaoLargeLanguageModel() - - response = model.invoke( - model="360gpt2-pro", - credentials={"api_key": os.environ.get("ZHINAO_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 0.5, "max_tokens": 10}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = ZhinaoLargeLanguageModel() - - response = model.invoke( - model="360gpt2-pro", - credentials={"api_key": os.environ.get("ZHINAO_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.5, "max_tokens": 100, "seed": 1234}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = ZhinaoLargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="360gpt2-pro", - credentials={"api_key": os.environ.get("ZHINAO_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 21 diff --git a/api/tests/integration_tests/model_runtime/zhinao/test_provider.py b/api/tests/integration_tests/model_runtime/zhinao/test_provider.py deleted file mode 100644 index c22f797919..0000000000 --- a/api/tests/integration_tests/model_runtime/zhinao/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.zhinao.zhinao import ZhinaoProvider - - -def test_validate_provider_credentials(): - provider = ZhinaoProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("ZHINAO_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/zhipuai/__init__.py b/api/tests/integration_tests/model_runtime/zhipuai/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/integration_tests/model_runtime/zhipuai/test_llm.py b/api/tests/integration_tests/model_runtime/zhipuai/test_llm.py deleted file mode 100644 index 20380513ea..0000000000 --- a/api/tests/integration_tests/model_runtime/zhipuai/test_llm.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -from collections.abc import Generator - -import pytest - -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessageTool, - SystemPromptMessage, - UserPromptMessage, -) -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.zhipuai.llm.llm import ZhipuAILargeLanguageModel - - -def test_validate_credentials(): - model = ZhipuAILargeLanguageModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="chatglm_turbo", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="chatglm_turbo", credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}) - - -def test_invoke_model(): - model = ZhipuAILargeLanguageModel() - - response = model.invoke( - model="chatglm_turbo", - credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Who are you?")], - model_parameters={"temperature": 0.9, "top_p": 0.7}, - stop=["How"], - stream=False, - user="abc-123", - ) - - assert isinstance(response, LLMResult) - assert len(response.message.content) > 0 - - -def test_invoke_stream_model(): - model = ZhipuAILargeLanguageModel() - - response = model.invoke( - model="chatglm_turbo", - credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, - prompt_messages=[UserPromptMessage(content="Hello World!")], - model_parameters={"temperature": 0.9, "top_p": 0.7}, - stream=True, - user="abc-123", - ) - - assert isinstance(response, Generator) - - for chunk in response: - assert isinstance(chunk, LLMResultChunk) - assert isinstance(chunk.delta, LLMResultChunkDelta) - assert isinstance(chunk.delta.message, AssistantPromptMessage) - assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True - - -def test_get_num_tokens(): - model = ZhipuAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="chatglm_turbo", - credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 14 - - -def test_get_tools_num_tokens(): - model = ZhipuAILargeLanguageModel() - - num_tokens = model.get_num_tokens( - model="tools", - credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, - tools=[ - PromptMessageTool( - name="get_current_weather", - description="Get the current weather in a given location", - parameters={ - "type": "object", - "properties": { - "location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"}, - "unit": {"type": "string", "enum": ["c", "f"]}, - }, - "required": ["location"], - }, - ) - ], - prompt_messages=[ - SystemPromptMessage( - content="You are a helpful AI assistant.", - ), - UserPromptMessage(content="Hello World!"), - ], - ) - - assert num_tokens == 88 diff --git a/api/tests/integration_tests/model_runtime/zhipuai/test_provider.py b/api/tests/integration_tests/model_runtime/zhipuai/test_provider.py deleted file mode 100644 index cb5bc0b20a..0000000000 --- a/api/tests/integration_tests/model_runtime/zhipuai/test_provider.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -import pytest - -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.zhipuai.zhipuai import ZhipuaiProvider - - -def test_validate_provider_credentials(): - provider = ZhipuaiProvider() - - with pytest.raises(CredentialsValidateFailedError): - provider.validate_provider_credentials(credentials={}) - - provider.validate_provider_credentials(credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}) diff --git a/api/tests/integration_tests/model_runtime/zhipuai/test_text_embedding.py b/api/tests/integration_tests/model_runtime/zhipuai/test_text_embedding.py deleted file mode 100644 index 9c97c91ecb..0000000000 --- a/api/tests/integration_tests/model_runtime/zhipuai/test_text_embedding.py +++ /dev/null @@ -1,41 +0,0 @@ -import os - -import pytest - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.errors.validate import CredentialsValidateFailedError -from core.model_runtime.model_providers.zhipuai.text_embedding.text_embedding import ZhipuAITextEmbeddingModel - - -def test_validate_credentials(): - model = ZhipuAITextEmbeddingModel() - - with pytest.raises(CredentialsValidateFailedError): - model.validate_credentials(model="text_embedding", credentials={"api_key": "invalid_key"}) - - model.validate_credentials(model="text_embedding", credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}) - - -def test_invoke_model(): - model = ZhipuAITextEmbeddingModel() - - result = model.invoke( - model="text_embedding", - credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, - texts=["hello", "world"], - user="abc-123", - ) - - assert isinstance(result, TextEmbeddingResult) - assert len(result.embeddings) == 2 - assert result.usage.total_tokens > 0 - - -def test_get_num_tokens(): - model = ZhipuAITextEmbeddingModel() - - num_tokens = model.get_num_tokens( - model="text_embedding", credentials={"api_key": os.environ.get("ZHIPUAI_API_KEY")}, texts=["hello", "world"] - ) - - assert num_tokens == 2 diff --git a/api/tests/unit_tests/core/model_runtime/model_providers/__init__.py b/api/tests/unit_tests/core/model_runtime/model_providers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/unit_tests/core/model_runtime/model_providers/wenxin/__init__.py b/api/tests/unit_tests/core/model_runtime/model_providers/wenxin/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/api/tests/unit_tests/core/model_runtime/model_providers/wenxin/test_text_embedding.py b/api/tests/unit_tests/core/model_runtime/model_providers/wenxin/test_text_embedding.py deleted file mode 100644 index 5b159b49b6..0000000000 --- a/api/tests/unit_tests/core/model_runtime/model_providers/wenxin/test_text_embedding.py +++ /dev/null @@ -1,75 +0,0 @@ -import numpy as np - -from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult -from core.model_runtime.model_providers.__base.tokenizers.gpt2_tokenzier import GPT2Tokenizer -from core.model_runtime.model_providers.wenxin.text_embedding.text_embedding import ( - TextEmbedding, - WenxinTextEmbeddingModel, -) - - -def test_max_chunks(): - class _MockTextEmbedding(TextEmbedding): - def embed_documents(self, model: str, texts: list[str], user: str) -> (list[list[float]], int, int): - embeddings = [[1.0, 2.0, 3.0] for i in range(len(texts))] - tokens = 0 - for text in texts: - tokens += len(text) - - return embeddings, tokens, tokens - - def _create_text_embedding(api_key: str, secret_key: str) -> TextEmbedding: - return _MockTextEmbedding() - - model = "embedding-v1" - credentials = { - "api_key": "xxxx", - "secret_key": "yyyy", - } - embedding_model = WenxinTextEmbeddingModel() - context_size = embedding_model._get_context_size(model, credentials) - max_chunks = embedding_model._get_max_chunks(model, credentials) - embedding_model._create_text_embedding = _create_text_embedding - - texts = ["0123456789" for i in range(0, max_chunks * 2)] - result: TextEmbeddingResult = embedding_model.invoke(model, credentials, texts, "test") - assert len(result.embeddings) == max_chunks * 2 - - -def test_context_size(): - def get_num_tokens_by_gpt2(text: str) -> int: - return GPT2Tokenizer.get_num_tokens(text) - - def mock_text(token_size: int) -> str: - _text = "".join(["0" for i in range(token_size)]) - num_tokens = get_num_tokens_by_gpt2(_text) - ratio = int(np.floor(len(_text) / num_tokens)) - m_text = "".join([_text for i in range(ratio)]) - return m_text - - model = "embedding-v1" - credentials = { - "api_key": "xxxx", - "secret_key": "yyyy", - } - embedding_model = WenxinTextEmbeddingModel() - context_size = embedding_model._get_context_size(model, credentials) - - class _MockTextEmbedding(TextEmbedding): - def embed_documents(self, model: str, texts: list[str], user: str) -> (list[list[float]], int, int): - embeddings = [[1.0, 2.0, 3.0] for i in range(len(texts))] - tokens = 0 - for text in texts: - tokens += get_num_tokens_by_gpt2(text) - return embeddings, tokens, tokens - - def _create_text_embedding(api_key: str, secret_key: str) -> TextEmbedding: - return _MockTextEmbedding() - - embedding_model._create_text_embedding = _create_text_embedding - text = mock_text(context_size * 2) - assert get_num_tokens_by_gpt2(text) == context_size * 2 - - texts = [text] - result: TextEmbeddingResult = embedding_model.invoke(model, credentials, texts, "test") - assert result.usage.tokens == context_size