From 45e51e7730f9db4ac5cffc8426144c5399f3e50d Mon Sep 17 00:00:00 2001 From: Su Yang Date: Mon, 18 Mar 2024 18:16:36 +0800 Subject: [PATCH 01/18] feat: AWS Bedrock Claude3 (#2864) Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: Chenhe Gu --- .../bedrock/llm/_position.yaml | 2 + .../llm/anthropic.claude-3-haiku-v1.yaml | 57 ++++ .../llm/anthropic.claude-3-sonnet-v1.yaml | 56 ++++ .../model_providers/bedrock/llm/llm.py | 294 +++++++++++++++++- api/requirements.txt | 2 +- 5 files changed, 407 insertions(+), 4 deletions(-) create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml create mode 100644 api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml diff --git a/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml b/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml index c4be732f2e..a4cfbd171e 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/bedrock/llm/_position.yaml @@ -4,6 +4,8 @@ - anthropic.claude-v1 - anthropic.claude-v2 - anthropic.claude-v2:1 +- anthropic.claude-3-sonnet-v1:0 +- anthropic.claude-3-haiku-v1:0 - cohere.command-light-text-v14 - cohere.command-text-v14 - meta.llama2-13b-chat-v1 diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml new file mode 100644 index 0000000000..73fe5567fc --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-haiku-v1.yaml @@ -0,0 +1,57 @@ +model: anthropic.claude-3-haiku-20240307-v1:0 +label: + en_US: Claude 3 Haiku +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + # docs: https://docs.anthropic.com/claude/docs/system-prompts + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.003' + output: '0.015' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml new file mode 100644 index 0000000000..cb11df0b60 --- /dev/null +++ b/api/core/model_runtime/model_providers/bedrock/llm/anthropic.claude-3-sonnet-v1.yaml @@ -0,0 +1,56 @@ +model: anthropic.claude-3-sonnet-20240229-v1:0 +label: + en_US: Claude 3 Sonnet +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 200000 +# docs: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-anthropic-claude-messages.html +parameter_rules: + - name: max_tokens + use_template: max_tokens + required: true + type: int + default: 4096 + min: 1 + max: 4096 + help: + zh_Hans: 停止前生成的最大令牌数。请注意,Anthropic Claude 模型可能会在达到 max_tokens 的值之前停止生成令牌。不同的 Anthropic Claude 模型对此参数具有不同的最大值。 + en_US: The maximum number of tokens to generate before stopping. Note that Anthropic Claude models might stop generating tokens before reaching the value of max_tokens. Different Anthropic Claude models have different maximum values for this parameter. + - name: temperature + use_template: temperature + required: false + type: float + default: 1 + min: 0.0 + max: 1.0 + help: + zh_Hans: 生成内容的随机性。 + en_US: The amount of randomness injected into the response. + - name: top_p + required: false + type: float + default: 0.999 + min: 0.000 + max: 1.000 + help: + zh_Hans: 在核采样中,Anthropic Claude 按概率递减顺序计算每个后续标记的所有选项的累积分布,并在达到 top_p 指定的特定概率时将其切断。您应该更改温度或top_p,但不能同时更改两者。 + en_US: In nucleus sampling, Anthropic Claude computes the cumulative distribution over all the options for each subsequent token in decreasing probability order and cuts it off once it reaches a particular probability specified by top_p. You should alter either temperature or top_p, but not both. + - name: top_k + required: false + type: int + default: 0 + min: 0 + # tip docs from aws has error, max value is 500 + max: 500 + help: + zh_Hans: 对于每个后续标记,仅从前 K 个选项中进行采样。使用 top_k 删除长尾低概率响应。 + en_US: Only sample from the top K options for each subsequent token. Use top_k to remove long tail low probability responses. +pricing: + input: '0.00025' + output: '0.00125' + unit: '0.001' + currency: USD diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index c6aaa24ade..5745721ae8 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -1,9 +1,22 @@ +import base64 import json import logging +import mimetypes +import time from collections.abc import Generator -from typing import Optional, Union +from typing import Optional, Union, cast import boto3 +import requests +from anthropic import AnthropicBedrock, Stream +from anthropic.types import ( + ContentBlockDeltaEvent, + Message, + MessageDeltaEvent, + MessageStartEvent, + MessageStopEvent, + MessageStreamEvent, +) from botocore.config import Config from botocore.exceptions import ( ClientError, @@ -13,14 +26,18 @@ from botocore.exceptions import ( UnknownServiceError, ) -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta +from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage from core.model_runtime.entities.message_entities import ( AssistantPromptMessage, + ImagePromptMessageContent, PromptMessage, + PromptMessageContentType, PromptMessageTool, SystemPromptMessage, + TextPromptMessageContent, UserPromptMessage, ) +from core.model_runtime.entities.model_entities import PriceType from core.model_runtime.errors.invoke import ( InvokeAuthorizationError, InvokeBadRequestError, @@ -54,9 +71,268 @@ class BedrockLargeLanguageModel(LargeLanguageModel): :param user: unique user id :return: full response or stream response chunk generator result """ + + # invoke claude 3 models via anthropic official SDK + if "anthropic.claude-3" in model: + return self._invoke_claude3(model, credentials, prompt_messages, model_parameters, stop, stream) # invoke model return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user) + def _invoke_claude3(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, + stop: Optional[list[str]] = None, stream: bool = True) -> Union[LLMResult, Generator]: + """ + Invoke Claude3 large language model + + :param model: model name + :param credentials: model credentials + :param prompt_messages: prompt messages + :param model_parameters: model parameters + :param stop: stop words + :param stream: is stream response + :return: full response or stream response chunk generator result + """ + # use Anthropic official SDK references + # - https://docs.anthropic.com/claude/reference/claude-on-amazon-bedrock + # - https://github.com/anthropics/anthropic-sdk-python + client = AnthropicBedrock( + aws_access_key=credentials["aws_access_key_id"], + aws_secret_key=credentials["aws_secret_access_key"], + aws_region=credentials["aws_region"], + ) + + system, prompt_message_dicts = self._convert_claude3_prompt_messages(prompt_messages) + + response = client.messages.create( + model=model, + messages=prompt_message_dicts, + stop_sequences=stop if stop else [], + system=system, + stream=stream, + **model_parameters, + ) + + if stream is False: + return self._handle_claude3_response(model, credentials, response, prompt_messages) + else: + return self._handle_claude3_stream_response(model, credentials, response, prompt_messages) + + def _handle_claude3_response(self, model: str, credentials: dict, response: Message, + prompt_messages: list[PromptMessage]) -> LLMResult: + """ + Handle llm chat response + + :param model: model name + :param credentials: credentials + :param response: response + :param prompt_messages: prompt messages + :return: full response chunk generator result + """ + + # transform assistant message to prompt message + assistant_prompt_message = AssistantPromptMessage( + content=response.content[0].text + ) + + # calculate num tokens + if response.usage: + # transform usage + prompt_tokens = response.usage.input_tokens + completion_tokens = response.usage.output_tokens + else: + # calculate num tokens + prompt_tokens = self.get_num_tokens(model, credentials, prompt_messages) + completion_tokens = self.get_num_tokens(model, credentials, [assistant_prompt_message]) + + # transform usage + usage = self._calc_response_usage(model, credentials, prompt_tokens, completion_tokens) + + # transform response + response = LLMResult( + model=response.model, + prompt_messages=prompt_messages, + message=assistant_prompt_message, + usage=usage + ) + + return response + + def _handle_claude3_stream_response(self, model: str, credentials: dict, response: Stream[MessageStreamEvent], + prompt_messages: list[PromptMessage], ) -> Generator: + """ + Handle llm chat stream response + + :param model: model name + :param credentials: credentials + :param response: response + :param prompt_messages: prompt messages + :return: full response or stream response chunk generator result + """ + + try: + full_assistant_content = '' + return_model = None + input_tokens = 0 + output_tokens = 0 + finish_reason = None + index = 0 + + for chunk in response: + if isinstance(chunk, MessageStartEvent): + return_model = chunk.message.model + input_tokens = chunk.message.usage.input_tokens + elif isinstance(chunk, MessageDeltaEvent): + output_tokens = chunk.usage.output_tokens + finish_reason = chunk.delta.stop_reason + elif isinstance(chunk, MessageStopEvent): + usage = self._calc_response_usage(model, credentials, input_tokens, output_tokens) + yield LLMResultChunk( + model=return_model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=index + 1, + message=AssistantPromptMessage( + content='' + ), + finish_reason=finish_reason, + usage=usage + ) + ) + elif isinstance(chunk, ContentBlockDeltaEvent): + chunk_text = chunk.delta.text if chunk.delta.text else '' + full_assistant_content += chunk_text + assistant_prompt_message = AssistantPromptMessage( + content=chunk_text if chunk_text else '', + ) + index = chunk.index + yield LLMResultChunk( + model=model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=index, + message=assistant_prompt_message, + ) + ) + except Exception as ex: + raise InvokeError(str(ex)) + + def _calc_claude3_response_usage(self, model: str, credentials: dict, prompt_tokens: int, completion_tokens: int) -> LLMUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param prompt_tokens: prompt tokens + :param completion_tokens: completion tokens + :return: usage + """ + # get prompt price info + prompt_price_info = self.get_price( + model=model, + credentials=credentials, + price_type=PriceType.INPUT, + tokens=prompt_tokens, + ) + + # get completion price info + completion_price_info = self.get_price( + model=model, + credentials=credentials, + price_type=PriceType.OUTPUT, + tokens=completion_tokens + ) + + # transform usage + usage = LLMUsage( + prompt_tokens=prompt_tokens, + prompt_unit_price=prompt_price_info.unit_price, + prompt_price_unit=prompt_price_info.unit, + prompt_price=prompt_price_info.total_amount, + completion_tokens=completion_tokens, + completion_unit_price=completion_price_info.unit_price, + completion_price_unit=completion_price_info.unit, + completion_price=completion_price_info.total_amount, + total_tokens=prompt_tokens + completion_tokens, + total_price=prompt_price_info.total_amount + completion_price_info.total_amount, + currency=prompt_price_info.currency, + latency=time.perf_counter() - self.started_at + ) + + return usage + + def _convert_claude3_prompt_messages(self, prompt_messages: list[PromptMessage]) -> tuple[str, list[dict]]: + """ + Convert prompt messages to dict list and system + """ + system = "" + prompt_message_dicts = [] + + for message in prompt_messages: + if isinstance(message, SystemPromptMessage): + system += message.content + ("\n" if not system else "") + else: + prompt_message_dicts.append(self._convert_claude3_prompt_message_to_dict(message)) + + return system, prompt_message_dicts + + def _convert_claude3_prompt_message_to_dict(self, message: PromptMessage) -> dict: + """ + Convert PromptMessage to dict + """ + if isinstance(message, UserPromptMessage): + message = cast(UserPromptMessage, message) + if isinstance(message.content, str): + message_dict = {"role": "user", "content": message.content} + else: + sub_messages = [] + for message_content in message.content: + if message_content.type == PromptMessageContentType.TEXT: + message_content = cast(TextPromptMessageContent, message_content) + sub_message_dict = { + "type": "text", + "text": message_content.data + } + sub_messages.append(sub_message_dict) + elif message_content.type == PromptMessageContentType.IMAGE: + message_content = cast(ImagePromptMessageContent, message_content) + if not message_content.data.startswith("data:"): + # fetch image data from url + try: + image_content = requests.get(message_content.data).content + mime_type, _ = mimetypes.guess_type(message_content.data) + base64_data = base64.b64encode(image_content).decode('utf-8') + except Exception as ex: + raise ValueError(f"Failed to fetch image data from url {message_content.data}, {ex}") + else: + data_split = message_content.data.split(";base64,") + mime_type = data_split[0].replace("data:", "") + base64_data = data_split[1] + + if mime_type not in ["image/jpeg", "image/png", "image/gif", "image/webp"]: + raise ValueError(f"Unsupported image type {mime_type}, " + f"only support image/jpeg, image/png, image/gif, and image/webp") + + sub_message_dict = { + "type": "image", + "source": { + "type": "base64", + "media_type": mime_type, + "data": base64_data + } + } + sub_messages.append(sub_message_dict) + + message_dict = {"role": "user", "content": sub_messages} + elif isinstance(message, AssistantPromptMessage): + message = cast(AssistantPromptMessage, message) + message_dict = {"role": "assistant", "content": message.content} + elif isinstance(message, SystemPromptMessage): + message = cast(SystemPromptMessage, message) + message_dict = {"role": "system", "content": message.content} + else: + raise ValueError(f"Got unknown type {message}") + + return message_dict + def get_num_tokens(self, model: str, credentials: dict, messages: list[PromptMessage] | str, tools: Optional[list[PromptMessageTool]] = None) -> int: """ @@ -101,7 +377,19 @@ class BedrockLargeLanguageModel(LargeLanguageModel): :param credentials: model credentials :return: """ - + + if "anthropic.claude-3" in model: + try: + self._invoke_claude3(model=model, + credentials=credentials, + prompt_messages=[{"role": "user", "content": "ping"}], + model_parameters={}, + stop=None, + stream=False) + + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + try: ping_message = UserPromptMessage(content="ping") self._generate(model=model, diff --git a/api/requirements.txt b/api/requirements.txt index 7edd95a893..b8714291a9 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -36,7 +36,7 @@ python-docx~=1.1.0 pypdfium2==4.16.0 resend~=0.7.0 pyjwt~=2.8.0 -anthropic~=0.17.0 +anthropic~=0.20.0 newspaper3k==0.2.8 google-api-python-client==2.90.0 wikipedia==1.4.0 From c3790c239c568a884dc788b74adb8e40af26de65 Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 00:57:19 +0800 Subject: [PATCH 02/18] i18n: update bedrock label (#2879) --- .../model_runtime/model_providers/bedrock/bedrock.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/core/model_runtime/model_providers/bedrock/bedrock.yaml b/api/core/model_runtime/model_providers/bedrock/bedrock.yaml index 05cd402d4e..e1923f8f8a 100644 --- a/api/core/model_runtime/model_providers/bedrock/bedrock.yaml +++ b/api/core/model_runtime/model_providers/bedrock/bedrock.yaml @@ -48,23 +48,23 @@ provider_credential_schema: - value: us-east-1 label: en_US: US East (N. Virginia) - zh_Hans: US East (N. Virginia) + zh_Hans: 美国东部 (弗吉尼亚北部) - value: us-west-2 label: en_US: US West (Oregon) - zh_Hans: US West (Oregon) + zh_Hans: 美国西部 (俄勒冈州) - value: ap-southeast-1 label: en_US: Asia Pacific (Singapore) - zh_Hans: Asia Pacific (Singapore) + zh_Hans: 亚太地区 (新加坡) - value: ap-northeast-1 label: en_US: Asia Pacific (Tokyo) - zh_Hans: Asia Pacific (Tokyo) + zh_Hans: 亚太地区 (东京) - value: eu-central-1 label: en_US: Europe (Frankfurt) - zh_Hans: Europe (Frankfurt) + zh_Hans: 欧洲 (法兰克福) - value: us-gov-west-1 label: en_US: AWS GovCloud (US-West) From 59f173f2e6611de8d40ad417a2649dcf58c9969a Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 19 Mar 2024 13:53:21 +0800 Subject: [PATCH 03/18] feat: add icons for 01.ai (#2883) --- .../model_providers/yi/_assets/icon_l_en.svg | 32 +++++++------------ .../model_providers/yi/_assets/icon_l_zh.svg | 20 ------------ .../model_providers/yi/_assets/icon_s_en.svg | 15 +++++---- .../model_runtime/model_providers/yi/yi.yaml | 2 +- 4 files changed, 21 insertions(+), 48 deletions(-) delete mode 100644 api/core/model_runtime/model_providers/yi/_assets/icon_l_zh.svg diff --git a/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg b/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg index 0efce4e85b..9ce3baddaa 100644 --- a/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg +++ b/api/core/model_runtime/model_providers/yi/_assets/icon_l_en.svg @@ -1,20 +1,12 @@ - - - - - - - - - - - - - - - - - - 01.AI - - + + + + + + + + + + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/yi/_assets/icon_l_zh.svg b/api/core/model_runtime/model_providers/yi/_assets/icon_l_zh.svg deleted file mode 100644 index 951842da55..0000000000 --- a/api/core/model_runtime/model_providers/yi/_assets/icon_l_zh.svg +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - - - - - - - - - - - 零一万物 - - diff --git a/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg index a813274466..eb0395a21c 100644 --- a/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg +++ b/api/core/model_runtime/model_providers/yi/_assets/icon_s_en.svg @@ -1,7 +1,8 @@ - - - - - - - \ No newline at end of file + + + + + + + + \ No newline at end of file diff --git a/api/core/model_runtime/model_providers/yi/yi.yaml b/api/core/model_runtime/model_providers/yi/yi.yaml index 368c715456..a8c0d857b6 100644 --- a/api/core/model_runtime/model_providers/yi/yi.yaml +++ b/api/core/model_runtime/model_providers/yi/yi.yaml @@ -9,7 +9,7 @@ icon_small: en_US: icon_s_en.svg icon_large: en_US: icon_l_en.svg -background: "#EFFDFD" +background: "#E9F1EC" help: title: en_US: Get your API Key from 01.ai From 507aa6d94966673cf65887e58429d4ecd66d76d3 Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 13:56:22 +0800 Subject: [PATCH 04/18] fix: Fix the problem of system not working (#2884) --- .../model_providers/bedrock/llm/llm.py | 47 ++++++++++++++----- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index 5745721ae8..b274cec35f 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -74,12 +74,12 @@ class BedrockLargeLanguageModel(LargeLanguageModel): # invoke claude 3 models via anthropic official SDK if "anthropic.claude-3" in model: - return self._invoke_claude3(model, credentials, prompt_messages, model_parameters, stop, stream) + return self._invoke_claude3(model, credentials, prompt_messages, model_parameters, stop, stream, user) # invoke model return self._generate(model, credentials, prompt_messages, model_parameters, stop, stream, user) def _invoke_claude3(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, - stop: Optional[list[str]] = None, stream: bool = True) -> Union[LLMResult, Generator]: + stop: Optional[list[str]] = None, stream: bool = True, user: Optional[str] = None) -> Union[LLMResult, Generator]: """ Invoke Claude3 large language model @@ -100,22 +100,38 @@ class BedrockLargeLanguageModel(LargeLanguageModel): aws_region=credentials["aws_region"], ) + extra_model_kwargs = {} + if stop: + extra_model_kwargs['stop_sequences'] = stop + + # Notice: If you request the current version of the SDK to the bedrock server, + # you will get the following error message and you need to wait for the service or SDK to be updated. + # Response: Error code: 400 + # {'message': 'Malformed input request: #: subject must not be valid against schema + # {"required":["messages"]}#: extraneous key [metadata] is not permitted, please reformat your input and try again.'} + # TODO: Open in the future when the interface is properly supported + # if user: + # ref: https://github.com/anthropics/anthropic-sdk-python/blob/e84645b07ca5267066700a104b4d8d6a8da1383d/src/anthropic/resources/messages.py#L465 + # extra_model_kwargs['metadata'] = message_create_params.Metadata(user_id=user) + system, prompt_message_dicts = self._convert_claude3_prompt_messages(prompt_messages) + if system: + extra_model_kwargs['system'] = system + response = client.messages.create( model=model, messages=prompt_message_dicts, - stop_sequences=stop if stop else [], - system=system, stream=stream, **model_parameters, + **extra_model_kwargs ) - if stream is False: - return self._handle_claude3_response(model, credentials, response, prompt_messages) - else: + if stream: return self._handle_claude3_stream_response(model, credentials, response, prompt_messages) + return self._handle_claude3_response(model, credentials, response, prompt_messages) + def _handle_claude3_response(self, model: str, credentials: dict, response: Message, prompt_messages: list[PromptMessage]) -> LLMResult: """ @@ -263,13 +279,22 @@ class BedrockLargeLanguageModel(LargeLanguageModel): """ Convert prompt messages to dict list and system """ - system = "" - prompt_message_dicts = [] + system = "" + first_loop = True for message in prompt_messages: if isinstance(message, SystemPromptMessage): - system += message.content + ("\n" if not system else "") - else: + message.content=message.content.strip() + if first_loop: + system=message.content + first_loop=False + else: + system+="\n" + system+=message.content + + prompt_message_dicts = [] + for message in prompt_messages: + if not isinstance(message, SystemPromptMessage): prompt_message_dicts.append(self._convert_claude3_prompt_message_to_dict(message)) return system, prompt_message_dicts From 2dee8a25d549c5707e04993b4cff6c8f236d3982 Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 15:50:02 +0800 Subject: [PATCH 05/18] fix: anthropic system prompt not working (#2885) --- .../model_providers/anthropic/llm/llm.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index ad74179353..724a0401b7 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -342,12 +342,20 @@ class AnthropicLargeLanguageModel(LargeLanguageModel): Convert prompt messages to dict list and system """ system = "" - prompt_message_dicts = [] - + first_loop = True for message in prompt_messages: if isinstance(message, SystemPromptMessage): - system += message.content + ("\n" if not system else "") - else: + message.content=message.content.strip() + if first_loop: + system=message.content + first_loop=False + else: + system+="\n" + system+=message.content + + prompt_message_dicts = [] + for message in prompt_messages: + if not isinstance(message, SystemPromptMessage): prompt_message_dicts.append(self._convert_prompt_message_to_dict(message)) return system, prompt_message_dicts From 7c0ae76cd0259f405161547d8ca60722399eee05 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Tue, 19 Mar 2024 16:31:46 +0800 Subject: [PATCH 06/18] Bump tiktoken to 0.6.0 to support text-embedding-3-* in encoding_for_model (#2891) --- api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/requirements.txt b/api/requirements.txt index b8714291a9..886d7e42d0 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -12,7 +12,7 @@ gunicorn~=21.2.0 gevent~=23.9.1 langchain==0.0.250 openai~=1.13.3 -tiktoken~=0.5.2 +tiktoken~=0.6.0 psycopg2-binary~=2.9.6 pycryptodome==3.19.1 python-dotenv==1.0.0 From 3bcfd84fbaa693ffe890fb74074f81379ab62e2d Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 16:32:06 +0800 Subject: [PATCH 07/18] chore: use API Key instead of APIKey (#2888) --- api/core/model_runtime/model_providers/tongyi/tongyi.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml index 441d833f70..b251391e34 100644 --- a/api/core/model_runtime/model_providers/tongyi/tongyi.yaml +++ b/api/core/model_runtime/model_providers/tongyi/tongyi.yaml @@ -24,9 +24,9 @@ provider_credential_schema: credential_form_schemas: - variable: dashscope_api_key label: - en_US: APIKey + en_US: API Key type: secret-input required: true placeholder: - zh_Hans: 在此输入您的 APIKey - en_US: Enter your APIKey + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key From f6314f8e7338940cf8629ccd4674abe0e1a35bd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Tue, 19 Mar 2024 16:32:26 +0800 Subject: [PATCH 08/18] feat:support azure openai llm 0125 version (#2889) --- .../model_providers/azure_openai/_constant.py | 134 ++++++++++++++++++ .../azure_openai/azure_openai.yaml | 12 ++ 2 files changed, 146 insertions(+) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index 4aa767fa1d..e81a120fa0 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -123,6 +123,65 @@ LLM_BASE_MODELS = [ ) ) ), + AzureBaseModel( + base_model_name='gpt-35-turbo-0125', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 16385, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=0.0005, + output=0.0015, + unit=0.001, + currency='USD', + ) + ) + ), AzureBaseModel( base_model_name='gpt-4', entity=AIModelEntity( @@ -273,6 +332,81 @@ LLM_BASE_MODELS = [ ) ) ), + AzureBaseModel( + base_model_name='gpt-4-0125-preview', + entity=AIModelEntity( + model='fake-deployment-name', + label=I18nObject( + en_US='fake-deployment-name-label', + ), + model_type=ModelType.LLM, + features=[ + ModelFeature.AGENT_THOUGHT, + ModelFeature.MULTI_TOOL_CALL, + ModelFeature.STREAM_TOOL_CALL, + ], + fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, + model_properties={ + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, + }, + parameter_rules=[ + ParameterRule( + name='temperature', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TEMPERATURE], + ), + ParameterRule( + name='top_p', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.TOP_P], + ), + ParameterRule( + name='presence_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.PRESENCE_PENALTY], + ), + ParameterRule( + name='frequency_penalty', + **PARAMETER_RULE_TEMPLATE[DefaultParameterName.FREQUENCY_PENALTY], + ), + _get_max_tokens(default=512, min_val=1, max_val=4096), + ParameterRule( + name='seed', + label=I18nObject( + zh_Hans='种子', + en_US='Seed' + ), + type='int', + help=I18nObject( + zh_Hans='如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。', + en_US='If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.' + ), + required=False, + precision=2, + min=0, + max=1, + ), + ParameterRule( + name='response_format', + label=I18nObject( + zh_Hans='回复格式', + en_US='response_format' + ), + type='string', + help=I18nObject( + zh_Hans='指定模型必须输出的格式', + en_US='specifying the format that the model must output' + ), + required=False, + options=['text', 'json_object'] + ), + ], + pricing=PriceConfig( + input=0.01, + output=0.03, + unit=0.001, + currency='USD', + ) + ) + ), AzureBaseModel( base_model_name='gpt-4-1106-preview', entity=AIModelEntity( diff --git a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml index 224f2a08a1..792d051d94 100644 --- a/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml +++ b/api/core/model_runtime/model_providers/azure_openai/azure_openai.yaml @@ -75,6 +75,12 @@ model_credential_schema: show_on: - variable: __model_type value: llm + - label: + en_US: gpt-35-turbo-0125 + value: gpt-35-turbo-0125 + show_on: + - variable: __model_type + value: llm - label: en_US: gpt-35-turbo-16k value: gpt-35-turbo-16k @@ -93,6 +99,12 @@ model_credential_schema: show_on: - variable: __model_type value: llm + - label: + en_US: gpt-4-0125-preview + value: gpt-4-0125-preview + show_on: + - variable: __model_type + value: llm - label: en_US: gpt-4-1106-preview value: gpt-4-1106-preview From 7e3c59e53ef835d0c3870c9b7151ded3b604c3fc Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 16:32:42 +0800 Subject: [PATCH 09/18] chore: Update TongYi models prices (#2890) --- .../model_providers/tongyi/llm/qwen-max-1201.yaml | 5 +++++ .../model_providers/tongyi/llm/qwen-max-longcontext.yaml | 5 +++++ .../model_runtime/model_providers/tongyi/llm/qwen-max.yaml | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml index 3461863e67..e0ba6fe4a8 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml @@ -59,3 +59,8 @@ parameter_rules: required: false - name: response_format use_template: response_format +pricing: + input: '0.12' + output: '0.12' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml index 9089c5904a..e2a291cc59 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml @@ -59,3 +59,8 @@ parameter_rules: required: false - name: response_format use_template: response_format +pricing: + input: '0.12' + output: '0.12' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml index eb1e8ac09b..8260b5081d 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml @@ -59,3 +59,8 @@ parameter_rules: required: false - name: response_format use_template: response_format +pricing: + input: '0.12' + output: '0.12' + unit: '0.001' + currency: RMB From e9aa0e89d34fbf2c686ec118888cf90b33e9f57d Mon Sep 17 00:00:00 2001 From: crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Tue, 19 Mar 2024 17:24:57 +0800 Subject: [PATCH 10/18] chore: update pr template (#2893) --- .github/pull_request_template.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 21ec0d5fa4..965831ebe3 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -12,6 +12,8 @@ Please delete options that are not relevant. - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update, included: [Dify Document](https://github.com/langgenius/dify-docs) +- [ ] Improvement,including but not limited to code refactoring, performance optimization, and UI/UX improvement +- [ ] Dependency upgrade # How Has This Been Tested? From 8133ba16b19fc1a5fb0a46b24db6150a4df0c7ff Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 18:13:32 +0800 Subject: [PATCH 11/18] chore: update Qwen model params (#2892) --- .../tongyi/llm/qwen-max-1201.yaml | 53 +++++++++++------- .../tongyi/llm/qwen-max-longcontext.yaml | 55 +++++++++++-------- .../model_providers/tongyi/llm/qwen-max.yaml | 53 +++++++++++------- .../model_providers/tongyi/llm/qwen-plus.yaml | 54 +++++++++++------- .../tongyi/llm/qwen-turbo.yaml | 53 +++++++++++------- 5 files changed, 162 insertions(+), 106 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml index e0ba6fe4a8..691347e701 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-1201.yaml @@ -8,55 +8,66 @@ model_properties: parameter_rules: - name: temperature use_template: temperature - default: 1.0 + type: float + default: 0.85 min: 0.0 max: 2.0 help: zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 2000 + min: 1 + max: 2000 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - name: top_p use_template: top_p + type: float default: 0.8 min: 0.1 max: 0.9 help: zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: max_tokens - use_template: max_tokens - default: 1500 - min: 1 - max: 6000 - help: - zh_Hans: 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。 - en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated. - name: top_k + type: int + min: 0 + max: 99 label: zh_Hans: 取样数量 en_US: Top k - type: int help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。默认不传递该参数,取值为None或当top_k大于100时,表示不启用top_k策略,此时,仅有top_p策略生效。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect. - required: false + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - name: seed + required: false + type: int + default: 1234 label: zh_Hans: 随机种子 en_US: Random seed - type: int help: - zh_Hans: 生成时,随机数的种子,用于控制模型生成的随机性。如果使用相同的种子,每次运行生成的结果都将相同;当需要复现模型的生成结果时,可以使用相同的种子。seed参数支持无符号64位整数类型。 - en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types. - required: false + zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 + en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: repetition_penalty - label: - en_US: Repetition penalty + required: false type: float default: 1.1 + label: + en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment. - required: false + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml index e2a291cc59..91129d37dd 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max-longcontext.yaml @@ -4,59 +4,70 @@ label: model_type: llm model_properties: mode: chat - context_size: 30000 + context_size: 32768 parameter_rules: - name: temperature use_template: temperature - default: 1.0 + type: float + default: 0.85 min: 0.0 max: 2.0 help: zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 2000 + min: 1 + max: 2000 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - name: top_p use_template: top_p + type: float default: 0.8 min: 0.1 max: 0.9 help: zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: max_tokens - use_template: max_tokens - default: 2000 - min: 1 - max: 28000 - help: - zh_Hans: 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。 - en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated. - name: top_k + type: int + min: 0 + max: 99 label: zh_Hans: 取样数量 en_US: Top k - type: int help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。默认不传递该参数,取值为None或当top_k大于100时,表示不启用top_k策略,此时,仅有top_p策略生效。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect. - required: false + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - name: seed + required: false + type: int + default: 1234 label: zh_Hans: 随机种子 en_US: Random seed - type: int help: - zh_Hans: 生成时,随机数的种子,用于控制模型生成的随机性。如果使用相同的种子,每次运行生成的结果都将相同;当需要复现模型的生成结果时,可以使用相同的种子。seed参数支持无符号64位整数类型。 - en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types. - required: false + zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 + en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: repetition_penalty - label: - en_US: Repetition penalty + required: false type: float default: 1.1 + label: + en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment. - required: false + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml index 8260b5081d..5d6b69f21f 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-max.yaml @@ -8,55 +8,66 @@ model_properties: parameter_rules: - name: temperature use_template: temperature - default: 1.0 + type: float + default: 0.85 min: 0.0 max: 2.0 help: zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 2000 + min: 1 + max: 2000 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - name: top_p use_template: top_p + type: float default: 0.8 min: 0.1 max: 0.9 help: zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: max_tokens - use_template: max_tokens - default: 1500 - min: 1 - max: 6000 - help: - zh_Hans: 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。 - en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated. - name: top_k + type: int + min: 0 + max: 99 label: zh_Hans: 取样数量 en_US: Top k - type: int help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。默认不传递该参数,取值为None或当top_k大于100时,表示不启用top_k策略,此时,仅有top_p策略生效。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect. - required: false + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - name: seed + required: false + type: int + default: 1234 label: zh_Hans: 随机种子 en_US: Random seed - type: int help: - zh_Hans: 生成时,随机数的种子,用于控制模型生成的随机性。如果使用相同的种子,每次运行生成的结果都将相同;当需要复现模型的生成结果时,可以使用相同的种子。seed参数支持无符号64位整数类型。 - en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types. - required: false + zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 + en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: repetition_penalty - label: - en_US: Repetition penalty + required: false type: float default: 1.1 + label: + en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment. - required: false + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml index 83640371f9..7c25e8802b 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-plus.yaml @@ -4,58 +4,70 @@ label: model_type: llm model_properties: mode: completion - context_size: 32000 + context_size: 32768 parameter_rules: - name: temperature use_template: temperature - default: 1.0 + type: float + default: 0.85 min: 0.0 max: 2.0 help: zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 1500 + min: 1 + max: 1500 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - name: top_p use_template: top_p + type: float default: 0.8 min: 0.1 max: 0.9 help: zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: max_tokens - use_template: max_tokens - default: 2000 - min: 1 - max: 30000 - help: - zh_Hans: 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。 - en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated. - name: top_k + type: int + min: 0 + max: 99 label: zh_Hans: 取样数量 en_US: Top k - type: int help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。默认不传递该参数,取值为None或当top_k大于100时,表示不启用top_k策略,此时,仅有top_p策略生效。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect. - required: false + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - name: seed + required: false + type: int + default: 1234 label: zh_Hans: 随机种子 en_US: Random seed - type: int help: - zh_Hans: 生成时,随机数的种子,用于控制模型生成的随机性。如果使用相同的种子,每次运行生成的结果都将相同;当需要复现模型的生成结果时,可以使用相同的种子。seed参数支持无符号64位整数类型。 - en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types. - required: false + zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 + en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: repetition_penalty - label: - en_US: Repetition penalty + required: false type: float default: 1.1 + label: + en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment. + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: diff --git a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml index 5455555bbd..20b46de6f3 100644 --- a/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml +++ b/api/core/model_runtime/model_providers/tongyi/llm/qwen-turbo.yaml @@ -8,55 +8,66 @@ model_properties: parameter_rules: - name: temperature use_template: temperature - default: 1.0 + type: float + default: 0.85 min: 0.0 max: 2.0 help: zh_Hans: 用于控制随机性和多样性的程度。具体来说,temperature值控制了生成文本时对每个候选词的概率分布进行平滑的程度。较高的temperature值会降低概率分布的峰值,使得更多的低概率词被选择,生成结果更加多样化;而较低的temperature值则会增强概率分布的峰值,使得高概率词更容易被选择,生成结果更加确定。 en_US: Used to control the degree of randomness and diversity. Specifically, the temperature value controls the degree to which the probability distribution of each candidate word is smoothed when generating text. A higher temperature value will reduce the peak value of the probability distribution, allowing more low-probability words to be selected, and the generated results will be more diverse; while a lower temperature value will enhance the peak value of the probability distribution, making it easier for high-probability words to be selected. , the generated results are more certain. + - name: max_tokens + use_template: max_tokens + type: int + default: 1500 + min: 1 + max: 1500 + help: + zh_Hans: 用于指定模型在生成内容时token的最大数量,它定义了生成的上限,但不保证每次都会生成到这个数量。 + en_US: It is used to specify the maximum number of tokens when the model generates content. It defines the upper limit of generation, but does not guarantee that this number will be generated every time. - name: top_p use_template: top_p + type: float default: 0.8 min: 0.1 max: 0.9 help: zh_Hans: 生成过程中核采样方法概率阈值,例如,取值为0.8时,仅保留概率加起来大于等于0.8的最可能token的最小集合作为候选集。取值范围为(0,1.0),取值越大,生成的随机性越高;取值越低,生成的确定性越高。 en_US: The probability threshold of the kernel sampling method during the generation process. For example, when the value is 0.8, only the smallest set of the most likely tokens with a sum of probabilities greater than or equal to 0.8 is retained as the candidate set. The value range is (0,1.0). The larger the value, the higher the randomness generated; the lower the value, the higher the certainty generated. - - name: max_tokens - use_template: max_tokens - default: 1500 - min: 1 - max: 6000 - help: - zh_Hans: 用于限制模型生成token的数量,max_tokens设置的是生成上限,并不表示一定会生成这么多的token数量。 - en_US: It is used to limit the number of tokens generated by the model. max_tokens sets the upper limit of generation, which does not mean that so many tokens will be generated. - name: top_k + type: int + min: 0 + max: 99 label: zh_Hans: 取样数量 en_US: Top k - type: int help: - zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。默认不传递该参数,取值为None或当top_k大于100时,表示不启用top_k策略,此时,仅有top_p策略生效。 - en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. This parameter is not passed by default. The value is None or when top_k is greater than 100, it means that the top_k policy is not enabled. At this time, only the top_p policy takes effect. - required: false + zh_Hans: 生成时,采样候选集的大小。例如,取值为50时,仅将单次生成中得分最高的50个token组成随机采样的候选集。取值越大,生成的随机性越高;取值越小,生成的确定性越高。 + en_US: The size of the sample candidate set when generated. For example, when the value is 50, only the 50 highest-scoring tokens in a single generation form a randomly sampled candidate set. The larger the value, the higher the randomness generated; the smaller the value, the higher the certainty generated. - name: seed + required: false + type: int + default: 1234 label: zh_Hans: 随机种子 en_US: Random seed - type: int help: - zh_Hans: 生成时,随机数的种子,用于控制模型生成的随机性。如果使用相同的种子,每次运行生成的结果都将相同;当需要复现模型的生成结果时,可以使用相同的种子。seed参数支持无符号64位整数类型。 - en_US: When generating, the random number seed is used to control the randomness of model generation. If you use the same seed, the results generated by each run will be the same; when you need to reproduce the results of the model, you can use the same seed. The seed parameter supports unsigned 64-bit integer types. - required: false + zh_Hans: 生成时使用的随机数种子,用户控制模型生成内容的随机性。支持无符号64位整数,默认值为 1234。在使用seed时,模型将尽可能生成相同或相似的结果,但目前不保证每次生成的结果完全相同。 + en_US: The random number seed used when generating, the user controls the randomness of the content generated by the model. Supports unsigned 64-bit integers, default value is 1234. When using seed, the model will try its best to generate the same or similar results, but there is currently no guarantee that the results will be exactly the same every time. - name: repetition_penalty - label: - en_US: Repetition penalty + required: false type: float default: 1.1 + label: + en_US: Repetition penalty help: zh_Hans: 用于控制模型生成时的重复度。提高repetition_penalty时可以降低模型生成的重复度。1.0表示不做惩罚。 - en_US: Used to control the repetition of model generation. Increasing the repetition_penalty can reduce the repetition of model generation. 1.0 means no punishment. - required: false + en_US: Used to control the repeatability when generating models. Increasing repetition_penalty can reduce the duplication of model generation. 1.0 means no punishment. + - name: enable_search + type: boolean + default: false + help: + zh_Hans: 模型内置了互联网搜索服务,该参数控制模型在生成文本时是否参考使用互联网搜索结果。启用互联网搜索,模型会将搜索结果作为文本生成过程中的参考信息,但模型会基于其内部逻辑“自行判断”是否使用互联网搜索结果。 + en_US: The model has a built-in Internet search service. This parameter controls whether the model refers to Internet search results when generating text. When Internet search is enabled, the model will use the search results as reference information in the text generation process, but the model will "judge" whether to use Internet search results based on its internal logic. - name: response_format use_template: response_format pricing: From 53d428907b9a8ab5ca8e2d7524e141ba731c7131 Mon Sep 17 00:00:00 2001 From: Lance Mao Date: Tue, 19 Mar 2024 18:17:12 +0800 Subject: [PATCH 12/18] =?UTF-8?q?fix=20incorrect=20exception=20raised=20by?= =?UTF-8?q?=20api=20tool=20which=20leads=20to=20incorrect=20L=E2=80=A6=20(?= =?UTF-8?q?#2886)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: OSS-MAOLONGDONG\kaihong --- api/core/tools/tool/api_tool.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/api/core/tools/tool/api_tool.py b/api/core/tools/tool/api_tool.py index fa7e7567dd..54e2f41019 100644 --- a/api/core/tools/tool/api_tool.py +++ b/api/core/tools/tool/api_tool.py @@ -9,7 +9,7 @@ import requests import core.helper.ssrf_proxy as ssrf_proxy from core.tools.entities.tool_bundle import ApiBasedToolBundle from core.tools.entities.tool_entities import ToolInvokeMessage -from core.tools.errors import ToolProviderCredentialValidationError +from core.tools.errors import ToolInvokeError, ToolParameterValidationError, ToolProviderCredentialValidationError from core.tools.tool.tool import Tool API_TOOL_DEFAULT_TIMEOUT = (10, 60) @@ -81,7 +81,7 @@ class ApiTool(Tool): needed_parameters = [parameter for parameter in self.api_bundle.parameters if parameter.required] for parameter in needed_parameters: if parameter.required and parameter.name not in parameters: - raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter.name}") + raise ToolParameterValidationError(f"Missing required parameter {parameter.name}") if parameter.default is not None and parameter.name not in parameters: parameters[parameter.name] = parameter.default @@ -94,7 +94,7 @@ class ApiTool(Tool): """ if isinstance(response, httpx.Response): if response.status_code >= 400: - raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}") + raise ToolInvokeError(f"Request failed with status code {response.status_code} and {response.text}") if not response.content: return 'Empty response from the tool, please check your parameters and try again.' try: @@ -107,7 +107,7 @@ class ApiTool(Tool): return response.text elif isinstance(response, requests.Response): if not response.ok: - raise ToolProviderCredentialValidationError(f"Request failed with status code {response.status_code}") + raise ToolInvokeError(f"Request failed with status code {response.status_code} and {response.text}") if not response.content: return 'Empty response from the tool, please check your parameters and try again.' try: @@ -139,7 +139,7 @@ class ApiTool(Tool): if parameter['name'] in parameters: value = parameters[parameter['name']] elif parameter['required']: - raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}") + raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}") else: value = (parameter.get('schema', {}) or {}).get('default', '') path_params[parameter['name']] = value @@ -149,7 +149,7 @@ class ApiTool(Tool): if parameter['name'] in parameters: value = parameters[parameter['name']] elif parameter['required']: - raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}") + raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}") else: value = (parameter.get('schema', {}) or {}).get('default', '') params[parameter['name']] = value @@ -159,7 +159,7 @@ class ApiTool(Tool): if parameter['name'] in parameters: value = parameters[parameter['name']] elif parameter['required']: - raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}") + raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}") else: value = (parameter.get('schema', {}) or {}).get('default', '') cookies[parameter['name']] = value @@ -169,7 +169,7 @@ class ApiTool(Tool): if parameter['name'] in parameters: value = parameters[parameter['name']] elif parameter['required']: - raise ToolProviderCredentialValidationError(f"Missing required parameter {parameter['name']}") + raise ToolParameterValidationError(f"Missing required parameter {parameter['name']}") else: value = (parameter.get('schema', {}) or {}).get('default', '') headers[parameter['name']] = value @@ -188,7 +188,7 @@ class ApiTool(Tool): # convert type body[name] = self._convert_body_property_type(property, parameters[name]) elif name in required: - raise ToolProviderCredentialValidationError( + raise ToolParameterValidationError( f"Missing required parameter {name} in operation {self.api_bundle.operation_id}" ) elif 'default' in property: From fbbba6db92f0d08d9d931533f85396a6a47adc5f Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 19 Mar 2024 18:34:23 +0800 Subject: [PATCH 13/18] feat: optimize ollama model default parameters (#2894) --- .../model_runtime/model_providers/ollama/llm/llm.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index e4388699e3..3589ca77cc 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -449,7 +449,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel): help=I18nObject(en_US="The temperature of the model. " "Increasing the temperature will make the model answer " "more creatively. (Default: 0.8)"), - default=0.8, + default=0.1, min=0, max=2 ), @@ -472,7 +472,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): help=I18nObject(en_US="Reduces the probability of generating nonsense. " "A higher value (e.g. 100) will give more diverse answers, " "while a lower value (e.g. 10) will be more conservative. (Default: 40)"), - default=40, min=1, max=100 ), @@ -483,7 +482,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): help=I18nObject(en_US="Sets how strongly to penalize repetitions. " "A higher value (e.g., 1.5) will penalize repetitions more strongly, " "while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1)"), - default=1.1, min=-2, max=2 ), @@ -494,7 +492,7 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.INT, help=I18nObject(en_US="Maximum number of tokens to predict when generating text. " "(Default: 128, -1 = infinite generation, -2 = fill context)"), - default=128, + default=512 if int(credentials.get('max_tokens', 4096)) >= 768 else 128, min=-2, max=int(credentials.get('max_tokens', 4096)), ), @@ -504,7 +502,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.INT, help=I18nObject(en_US="Enable Mirostat sampling for controlling perplexity. " "(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"), - default=0, min=0, max=2 ), @@ -516,7 +513,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): "the generated text. A lower learning rate will result in slower adjustments, " "while a higher learning rate will make the algorithm more responsive. " "(Default: 0.1)"), - default=0.1, precision=1 ), ParameterRule( @@ -525,7 +521,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.FLOAT, help=I18nObject(en_US="Controls the balance between coherence and diversity of the output. " "A lower value will result in more focused and coherent text. (Default: 5.0)"), - default=5.0, precision=1 ), ParameterRule( @@ -543,7 +538,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.INT, help=I18nObject(en_US="The number of layers to send to the GPU(s). " "On macOS it defaults to 1 to enable metal support, 0 to disable."), - default=1, min=0, max=1 ), @@ -563,7 +557,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.INT, help=I18nObject(en_US="Sets how far back for the model to look back to prevent repetition. " "(Default: 64, 0 = disabled, -1 = num_ctx)"), - default=64, min=-1 ), ParameterRule( @@ -573,7 +566,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): help=I18nObject(en_US="Tail free sampling is used to reduce the impact of less probable tokens " "from the output. A higher value (e.g., 2.0) will reduce the impact more, " "while a value of 1.0 disables this setting. (default: 1)"), - default=1, precision=1 ), ParameterRule( @@ -583,7 +575,6 @@ class OllamaLargeLanguageModel(LargeLanguageModel): help=I18nObject(en_US="Sets the random number seed to use for generation. Setting this to " "a specific number will make the model generate the same text for " "the same prompt. (Default: 0)"), - default=0 ), ParameterRule( name='format', From 4419d357c4cfd87c2aee7e95895881d2075c1c5f Mon Sep 17 00:00:00 2001 From: Su Yang Date: Tue, 19 Mar 2024 20:54:31 +0800 Subject: [PATCH 14/18] chore: update Yi models params (#2895) --- .../yi/llm/yi-34b-chat-0205.yaml | 27 +++++++++++---- .../yi/llm/yi-34b-chat-200k.yaml | 33 ++++++++++++++----- .../model_providers/yi/llm/yi-vl-plus.yaml | 27 +++++++++++---- 3 files changed, 66 insertions(+), 21 deletions(-) diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml index 4d4148aa91..429c646b77 100644 --- a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml +++ b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-0205.yaml @@ -9,18 +9,33 @@ model_properties: mode: chat context_size: 4096 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - name: max_tokens use_template: max_tokens type: int default: 512 min: 1 - max: 4096 - - name: temperature - use_template: temperature + max: 4000 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p type: float - default: 0.7 - min: 0 - max: 2 + default: 0.8 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. pricing: input: '0.0025' output: '0.0025' diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml index 4fbe84e9b7..d0e181d007 100644 --- a/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml +++ b/api/core/model_runtime/model_providers/yi/llm/yi-34b-chat-200k.yaml @@ -9,18 +9,33 @@ model_properties: mode: chat context_size: 200000 parameter_rules: - - name: max_tokens - use_template: max_tokens - type: int - default: 1024 - min: 1 - max: 200000 - name: temperature use_template: temperature type: float - default: 0.7 - min: 0 - max: 2 + default: 0.6 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. + - name: max_tokens + use_template: max_tokens + type: int + default: 4096 + min: 1 + max: 199950 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p + type: float + default: 0.9 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. pricing: input: '0.012' output: '0.012' diff --git a/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml b/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml index 6195051f16..a6abcc401f 100644 --- a/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml +++ b/api/core/model_runtime/model_providers/yi/llm/yi-vl-plus.yaml @@ -9,18 +9,33 @@ model_properties: mode: chat context_size: 4096 parameter_rules: + - name: temperature + use_template: temperature + type: float + default: 0.3 + min: 0.0 + max: 2.0 + help: + zh_Hans: 控制生成结果的多样性和随机性。数值越小,越严谨;数值越大,越发散。 + en_US: Control the diversity and randomness of generated results. The smaller the value, the more rigorous it is; the larger the value, the more divergent it is. - name: max_tokens use_template: max_tokens type: int default: 512 min: 1 - max: 4096 - - name: temperature - use_template: temperature + max: 4000 + help: + zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。 + en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter. + - name: top_p + use_template: top_p type: float - default: 0.7 - min: 0 - max: 2 + default: 0.8 + min: 0.01 + max: 1.00 + help: + zh_Hans: 控制生成结果的随机性。数值越小,随机性越弱;数值越大,随机性越强。一般而言,top_p 和 temperature 两个参数选择一个进行调整即可。 + en_US: Control the randomness of generated results. The smaller the value, the weaker the randomness; the larger the value, the stronger the randomness. Generally speaking, you can adjust one of the two parameters top_p and temperature. pricing: input: '0.01' output: '0.03' From 696efe494eef87bd5dc1bb130be302c83e2cc589 Mon Sep 17 00:00:00 2001 From: listeng <1536813+listeng@users.noreply.github.com> Date: Tue, 19 Mar 2024 20:55:15 +0800 Subject: [PATCH 15/18] fix: Ignore some emtpy page_content when append to split_documents (#2898) --- .../index_processor/processor/paragraph_index_processor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/core/rag/index_processor/processor/paragraph_index_processor.py b/api/core/rag/index_processor/processor/paragraph_index_processor.py index 3f0467ee24..5fbc319fd6 100644 --- a/api/core/rag/index_processor/processor/paragraph_index_processor.py +++ b/api/core/rag/index_processor/processor/paragraph_index_processor.py @@ -45,11 +45,12 @@ class ParagraphIndexProcessor(BaseIndexProcessor): # delete Spliter character page_content = document_node.page_content if page_content.startswith(".") or page_content.startswith("。"): - page_content = page_content[1:] + page_content = page_content[1:].strip() else: page_content = page_content - document_node.page_content = page_content - split_documents.append(document_node) + if len(page_content) > 0: + document_node.page_content = page_content + split_documents.append(document_node) all_documents.extend(split_documents) return all_documents From 518c1ceb9427edc9ed0caaebc5a1e50151c6c298 Mon Sep 17 00:00:00 2001 From: Joshua <138381132+joshua20231026@users.noreply.github.com> Date: Tue, 19 Mar 2024 21:08:17 +0800 Subject: [PATCH 16/18] Feat/add-NVIDIA-as-a-new-model-provider (#2900) --- .../model_providers/_position.yaml | 1 + .../model_providers/nvidia/__init__.py | 0 .../nvidia/_assets/icon_l_en.png | Bin 0 -> 112528 bytes .../nvidia/_assets/icon_s_en.svg | 3 + .../model_providers/nvidia/llm/_position.yaml | 4 + .../model_providers/nvidia/llm/fuyu-8b.yaml | 27 ++ .../model_providers/nvidia/llm/gemma-7b.yaml | 30 +++ .../nvidia/llm/llama2-70b.yaml | 30 +++ .../model_providers/nvidia/llm/llm.py | 247 ++++++++++++++++++ .../mistralai_mixtral-8x7b-instruct-v0.1.yaml | 30 +++ .../model_providers/nvidia/nvidia.py | 30 +++ .../model_providers/nvidia/nvidia.yaml | 30 +++ .../model_providers/nvidia/rerank/__init__.py | 0 .../nvidia/rerank/rerank-qa-mistral-4b.yaml | 4 + .../model_providers/nvidia/rerank/rerank.py | 112 ++++++++ .../nvidia/text_embedding/__init__.py | 0 .../nvidia/text_embedding/embed-qa-4.yaml | 5 + .../nvidia/text_embedding/text_embedding.py | 172 ++++++++++++ 18 files changed, 725 insertions(+) create mode 100644 api/core/model_runtime/model_providers/nvidia/__init__.py create mode 100644 api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png create mode 100644 api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/_position.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/llm.py create mode 100644 api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/nvidia.py create mode 100644 api/core/model_runtime/model_providers/nvidia/nvidia.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/rerank/__init__.py create mode 100644 api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/rerank/rerank.py create mode 100644 api/core/model_runtime/model_providers/nvidia/text_embedding/__init__.py create mode 100644 api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml create mode 100644 api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py diff --git a/api/core/model_runtime/model_providers/_position.yaml b/api/core/model_runtime/model_providers/_position.yaml index 97116978cd..049ad67a77 100644 --- a/api/core/model_runtime/model_providers/_position.yaml +++ b/api/core/model_runtime/model_providers/_position.yaml @@ -2,6 +2,7 @@ - anthropic - azure_openai - google +- nvidia - cohere - bedrock - togetherai diff --git a/api/core/model_runtime/model_providers/nvidia/__init__.py b/api/core/model_runtime/model_providers/nvidia/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png b/api/core/model_runtime/model_providers/nvidia/_assets/icon_l_en.png new file mode 100644 index 0000000000000000000000000000000000000000..5a7f42e61792b7b8be0529f6ae0ad4f3ba5fa7f9 GIT binary patch literal 112528 zcmeEt2U`?bw=S_!Nh$+KR%k>7LCK*3BhY{_B2ls=k<{cIT2w~Kg@8!<96%%^NKTCi zZA%gua%zI)&;-fh?!uY#UC%l9{({e^9<0J$d#}CLyWSOc`Qmp?75Y=`r^v|2=+UaT zwaLhs{vacxtT=%HzfnYSz9%Cu*GJ#Jp>vLHu|cbKZ0kn<>ekHO{@@!aW=6-?X)noE zL$J*!Kk4N)H=-{woG!PMyeOhNSvG{sxHkRQ6;_^0ci&vQ;4)}8^8Oo+87dAcVabO2E* zOb-%hR+iFWvHtzXUtRs*5C3(S_v!3^-T(V0w}|TB5C7cZE(??YbN}zZi%$J#TC)Er z0RZAZO8Acwe)7V9knkTQ{09j?HSixK{09mDLBfA&;Q!+e7vU_n{k!K(SMJiyAI3-D z?nk69f0r1__uLEG%%KFiC+SIN)BimaS#a(k6-hT>-IQuQX-K+YhmJ;wW$*XLTSm; z-297{p;W)wIuZ8>Kca#{XW!nnpgX{aFF)D zFX27XPen#LJ94Bdtt(yHZCaAlJZqkwl#`USq${o9m(9W6GfSP`p}4(%piv{NxI3D< zNUi10Y|FDWz2s1sviW97a)?9zfb@E)Z7_E;r}n?kAKW(#lq}SqV(&AZHJCKCDW&e` zR#u+xC7fO>J7oW{&%rr-b9(Xl;pi-NiL+xFN8t;a(Qc+5rU2TlBMWDf)McW9oVSlV z#z9_Y-aI{|gh5K)U`dxIw;YrUB<(g&%3oG2c+5nG8IjrYrb(^)Ydn)7%GWlXr_jmC zv%92J+D1D3-q3)4#N&3;t9v=Jd+p&O&5r{%+7AnMtLMW^nhv7efyx~J8n>RqCvl~E z7>>#D;ZN0SLk5A$3O{*VPW5ioCet6@-`Wnaj&cpWy_d?m1ItCcQ28_pSs(&qT^nt~_O?GivyKh(h z_l7#(TF-s+EK{2Q{iyn2>5=o?LK`veM^e^a?tEd+QD@(FoQiMvR?XW3m&ya>y|zsb zuAI%>gZV4JP71^~1M?c>yUq9wiK_!~-*~(q2=sVA=sfhc@}qc;to_EHXY#(T)Rc?r z2ALlC0EyFs+YvazR=fRo1QGN2Z+Ef{L=}Ym*S0>IQYvw*Zs(B06bHcWk+xoNc>f49g=s zW0n?sugP^SE`hx-NDa~dVzY{zyJR?_Y5k6^GD`JwhjK1Ecl8p!j=)Nz=GoBQmuE<4 zQSN4}IkY{tzMYAoLqh}ZR=xR_?pE^E9^sJ^ety491|CRqCBROG{m2HIebF{rs=HVj}bND+*O6NJ`G}GmfA4zk|F!F1=ESKv0F=(R3dqZq)%t}m zpYs^X8=B6$_Sm!O{h6_@*@q-{U%(qWtT`PUf4W6^5feqdjD|FY)soGE;4)|ArEKucU7a@gH(ZZ5bj9oVN=D4B+YdB4bc2i!Ttwyk>KY@Q(mZOa zY6n)8oq;*Fms|fAZ#AA#+@1?`FkSBVba!ZtR5!&&HG#? z$|}_l+u6x$BzP*zMRm;HD~>Yr$c*LpEJZ}>mrKi!&PM!qx5IgvGm#nJYDW!^7SHv+ zPcMno#g}<_yEnbJArsA|(b}@@Nz*<0Af!mj4t!|3y#0QNgR9V~Q0jloQ*OaTRuh@x zS3}tmkS#wmo2u+3C2z|0`h#40Sc3<#A#Fh`S1JE6KtI{1qBbS7+bt{2X>)mg8pIG^ ziK=tIY!RcIL`L+OUff}DUf2+;S{YCu8JK-u_YhGe$2ijWY~Hd5Up29qK4M(!x4hBa zv-f7?*Y{x+Jz^ru8y?UqS4vyCozB$5RWd4>!&WF%G7`+~t)->IldHIi)%sDZo-*7^ zxF#>VUFa^Dzb6 zM9Yy6Tq${)dICvYbiYghGtk$IWCmD9g~Os-K7jl78khwpZ8|9DZb zjk7%XD=*g4vsi9mw)qz!z(d7F2Z~_*FO_(CQjo}x68Ut$nMHSxiZgw=olU_BO(9#G zmGf>rw~p3UJ;yeTSM0O=l4=VFfmiiDa=-S=WW*c*-ZB3Lg*KpI^Cf_{w0y-qp{R{<=@s|K?aOQ)Ih`MVc^NzyMBF7K-A#u5 z$9TTro>x?-iJPZaN#}{+zU(m{XR{pcd~St3v%Na0l*jNSb{mtw?g3E#8<6;!O%6{k zDXxEC<}RsDt^I2xeYA(!4S|!K#%}$P?pe{VKUzqerK#J0t{IVe-_5h*JW1K7ed^apDOyG=G;K&xZf z{|lkxJSO3&*tVFOlA;On7eD2LN8;WdfT~Ts0v8SxbPqg#3?1!u9YbgS>K<*DC62 zOlrpa#yDvtH7Cn94VgYsTYz4hd9ia{Fo+VG<<4kn3iOj&exHipkcq^C!4 z^P!}F?~%4mK#D2 zlx%L{7C$izKcQa4UD>86vm~eOkL2p-C%A4X*4z^dU})rIK@w6|ZxXC3={dkFR)Vh^ zlM6kgYi+^EXI#s!Or8$~oIDe_`kzq?gH4;0)50@bRV6AevcbfTEw+8Jz?}{H)Os1qz;kVrc0b?0Vy+2Uoo$`C&N-V@ z%@y>lW4w^0zs-4g>iD+kK4iCk50=nomAA$SLSeLU4M3-U{epyyA$S6L-#wZ_F= z;_yY`IexU5OM7zp$)w(ZZ$tRdB%asOucdu6V9g~ex3cw^Gea!XLFSao3zeRSNo1r0%NXh^=KE=?EakqXf__56i-+O|I(Zw^#mkHY9 zVSPk8G-Pe32U(%Fv+23lG1K&VmfT~CYVW>0(uDRYZS%^QoevY~4-Z79L)q3F9|cZz zg4e}Nl-;23)|wE~Y>bISg~In$(GL~Tr+xD1f0+*nQwIf0tG?^l8W*g10HLl5pf9%c zRG9wM*t?csN=&d{8ST&q${`m@py)D78c#LYf_}Cvg)vrwzE|3T1>=DQxwRLh=93MJ}F)HA`q2sPVl_V7EGm)uFEmorfzR=f1Te`+zCE+Y8v3!a$V z)>wWZS*UvPD+SgLTo$lI9N2H|{02S41FJxrB45rnOV4N|dO1vd+OZ*C+Jrbfz^utB z<1&ST%~R~&29*1Dl?p|UuN=BkL6`Qqr-FhO8s_1_IrgiFnba|Ne9q!01e7a9#%aWd zArmi_13>tFnk32Ot@U_X2$fCTub|Mkya8*>u84i?B0+nTO9qh!Ar18aPBtt0OqVRi zHhpqgRo25=8AAjjfDS2ttVFKcZkMEu&JGVWya-EYW#ANr{)0g&GS6U7cle%pP4HdD~3MjoC>`<#H>@fB$n1XLr8I%O7(+-hS~yVyeG5{B%y z0{%Sp7IT%MS#a4^99Xh~p>N{B@CA4?+CRiNFA(Bx6U(l_1#&tZ6Znm-Y_?p=>0 z6oYu=TA%ERrKAg7jAT=KLdn0=3~0^@)W@SN4nC9ofy-yScCYOt831UYOpBuPhNja1 zWCN*@S(B0=2q``fTfH!hG^|q$$#n^~+U1N4<0sT^NuchD8IB4NK33|21#?oL3XCJY zhOj~yK`9GB#t@S6*#YJyGIlC%n{AjET`UYNGi+a2JJ`s*E*;6knqrPYaSIV@frB6! zpO~S(QndTDP#><#Og@_=-cCLb&ye4lY~^DJo97+=P=-xlQp%q*Vbax~(+eu!HM7UellV;(Zrir+M(1jM3ez+s#p zkK9Q$FK2q*As=Wp3RV+#vE`hp{|S+$PD&Dn_=f zCO~uLY(Wvu2^tBFBn7a@&a@3|6A_pLt+Hi*3!M-s3KoJzh}*7#MerNsyCzCNJ_zwL z&q5OZ0_jAhN|p(+D3LkRY_mx$KBLb&GC@E#$4**$O^ID6@Gg99KD8Y; z^rk#Kd%XW3WTK{#i=ZiQqDWMO#qs))IHeekNckc|czmV_8t=ZH^End@d->;aZ3Yk>lmZt*<{!M1#T*7!Uh<-;A>8z0< zOfFAh2?o{yDvD770-DW)Wmm~Sp87~GXwLNlkU>#pLP;HRUOqvv+WD;jvSJ?rys42Z zFpl1~@EUl2xBrj3700B$H^~oa6eo=PqoHhRA@rITOz5n2v#$~xw8GPG(Lsx7z}m&P zlRpgVIhX{!2=cXz6?_^P6*%^|nHCXc97Z#lMEO1ey~QMK7nY zssb1s6M{R<`1u;{Y7Dj11j->6ygW7fx)F#}99zT#^3cS`;*eFM{1QcIN?m7t?Lk;U zGTXiYCodsY8h}x~bvJ!c&c1ZLIVDv-Qj@FlI;{T4V<-X_Iecq7$x6!d<~fv2y@{Tm z77)Hwc)-l)*UH4DBqG;9+>XIBz+|OSyY%c(VD#WVkJo6UQ;DZd=b#JbPuK|H@@upt zJ*+t=J(0eBNu(htRwKSm<~W;df~Hcru+DKBk!{-*@W}4R{JrM%PEGvqKo9ukedm(b z30CZ_lS>u%A*;u2YLKSZc}s4e)TwVf?aLCo0+KGK4~0Giy`+!$EUWv1_a>TIKJ(MUMtlb zujY@n6#q#9mGWeTpK>t@iZ9hk_lAYR1`-3uB7D&A2<-l%hq*|C&YKwI{pq_2{VN-^ z2mR%Woixpzdl9A~cmNz9veB~Uy^aS<%#%d^$ysTHQv$4lEeDX}4KUR)6*jI+My>iT z@)0gapixspCmAzHeT++HTvup{ATx(&$b9yTd5f&jQ8!8A&nQ( zW{c=7v)Q{PW@}iNjkIu6V0$Y@kUJj|9oJ1I=}aGzoYId3!OQ5n-Y0Mb1JdpL zc3|C#w95Vx&H8Ug@+CuH$Fj;DAWJGvNO8%dZI|oKZVWHo*4jSC+msWi0tln7rw{q1 zh>R|0ZTK1U)j^rZ=KTh_^O(jW&dSxH@uJUp1fk0djyE(;ECSq`>`WQw=3-36^nIRh%N|y1~l?s{t55=;itqoc-zp|8qQ_;AjUzoownK08#_& zC~6~q^t-R&$i&lGj_zGmVgCM{wLFEN()klPVM3)#4oFr9crs&1pJCsZgR72{$LTR) zbW6+S;%HP_(*=kI?__*AQ89Fq08NR|jGrJ(ZBreC8R^$dOA_%v?cAgUJCVk7AO9$1 z_u`A!3CKh_pxKi~K`-LUgF16V89|;DPZdnHb17FA~OE#Z3Fr=Hv>p$3T zN*7jzoQbVIL!>v|(%Y3T7%6~gcx%v9gDad)c`GRfN2zZXv~9By+;^lnBkQA)d7sMF zFXC;6Mc{#(dABepOhZAUy?uHaR=M!qbx4@Tp2V;ik4$8TrSa{iSt@_nTVFA$S;&Fd zQy23X-MF!*;LxjxC{d=_E_P`?Z2U7WEF>U&bKP28Xy1&lMA;4D_F7U7XR_I-=r#O; zEy(#JD?MsLv)6Hhxw@!PpU&x1yM_CZOhr>dMD++!2vOb|?!;lc$jLG?C4`V?VO{bc za`99Z>j4!ly{H@rEc7MqSLL+foqY$zHq!7^9j#J?^n~=PTmQuYd8vmq4AB}1GsJq; zvyrbf;@7RU&LP#uVB+NVk7}|fu^We&eR6&X1KDa$=A3|9F(+r3?ZjgcH$grZAKfi2 z^N4ew$}}ntfNTg8+rY4Rwd$m_k$X2TvWaake`vOT{Q6PLY!KUit;_R<1p32vnW~{j z*@;cqx`*!n66FXOm|dY0KOB|WmY)o-m(DXEQ8xQ9LcEyH1*%g+Fb#u)S!GuwC~Ei# zxBZ4(3UK$FN6iCmFo@$|Z1o22A%Hy7X7zD$@L01SDHKvzsr8Npn0*-s8U?Opq6{Ds z*<!r_wMd6y5q0p=SH$l~Y-#3msP*VtP!1F`Gwkkw(!c>1a@H4QJkcD%cA*|!5 z8rI{o1SP?Mt3kF9*imdj7tmtNHvEKJvmYZg*%3;WG5`gkpW3mGd_zT!oB>Yfh9!in zP7di8xOamz<0DO8zoS!f%-+ly8O+r6^m=LYb8IR$HiqS$pkcI7G23lpT>hmufZJ&Z zjDBTB@W#6wveZ`{o}C#zyi>@7I6AclGK)l;pWfW%a(?v513hZE^2cfNOETYOhpO^9p zu=5WjA=1RFEI3E5iuWrrIGtk|DXM6pH}ef&s>eK)N1gC2EV?TGbyPC+7Us*<$(1}D zm)Gu)2ZymzYrj-v0S}^2RHQ1Bo^Po_DM1WL?%;&~|+OJ7#gk$&Gcu^h@y=TxLy=>R01UK_eMjyZTW7S^>VSjqZ* zY!JtHiU$nHh|w9i&s0Se@KgkHnmA()CjMNo<*^wOKWhFV8jw8#p)R#6_L>p%o$J2Q z*s|36bJqA1<>BSQ^2elQh z1$5L{XDmRH2U)H7icIqxMoxb~@PL%my95-4m_cO$`h5;gF*R4E5gQCw8CwARP;*0> zJP<{c+q&Uw`NQ%`7v=c)&`Cd)x<2AcYim(OlGd`l#mKWV2ICUHPJyR@5uJiSH)t_>Q28CViV|<+L?Dw=m?ON8T1$Fv(8cuNRpdpBtpc!{t~IN-=I=wLeD)D~ z>aNa9&3X=819HNHaw3COG7I&oZh4}wf3Mazc+F<*+RDZy<=WpG9Mdhet8Q%Xp5IU! z7FM8O8)1Jyg2T=0VcW*e6MN9qDLqT8ciRlk^a}gU zRiPMHVy$H5)Brv>(;x-AzhiTva2C;^hIOUsJ1nxojElh)f~%(l!zU>;^g>Dqy;<~w z12rAlsqPPwYmz^Dd0;ju+<^;mrMIb}wtJgW-_)8=SRd`298-oG{t8?Lnw5CohcENMIs$ z@Eba9HoSlcF3e7^6zrmf1K_?&B5EvBnwU1OG%oB>m$O6zf*$7h}nl(Atw{BxAqYQ zO{F%*VfGzz&H2!?M}+nX*Dq`f2y z@Uh75NixnCQK0jl{6`!B?~Tom_lh@`Kv&z!HUBfmYKXUVdlM^|0Ce%f_)iKDmDC`O zkA~^`D)C#=}e%#Jz>!Y)(uN;1(=N*mTYN zR4Kz6Bv@%HU&N*b0Y_WC27(A5GImnD|2Rv!*e$-Q0Y+8)4&HDbG*)A=2SX*rxF@2#wE)V1r_DGiniXlw-QS@3e zw>SAfer0ThF+fVBp=KR5;9yJf!{e+#qb$J!45X^d2b+s1@ckU2 z_~B5}Q*DhtWz|3Ed3o~YnIs#V8rP|~*Kqv|uBiqLogTa_k9u9N%_5E*nsbqXJt96t zSLU{u>~Tj4n!}JC@}M|oCd%Qr#)d7gg5p{#YUPtHz++b)vNEC9Wa^}{eQ4)kOrr`x z?CLhunbq7W*%)qbQ!%Zc4l9usT4W2MbRm9GR~bI^4ssnt=+g@sm&)vZ9>7;ALw*}& zk|NDbAocG2l8ecM*HLeP!MIdMeMBKJnMF#13OCt|Bfz=N8yXrMt zo1(r4SG|Qm4+IkC51}Tk6$b-TTgmn_)?;nnY1osE$DjcTP zEIOHm^s62o-1Af`eubjeN*s^y8n)}JqBXg;?7512SJGRd0o9iuUaBVXjC#KqGv491 z4Nr$#UB~7IHAUZK_O^A6zH5T|YTvdeVMB&43dhQyXw!kaOqiEU zR*eV=w}wMBk9%ha8pCZT$MS||?>Cya$n+OKEWe_pw4#jOpSZ_Dn#^)0?M)Knwk?2o zjHR7F>q7-rt)}~{n){`~UPd3-?D@MV4P`iaVy)ife@aKjj$br}d+_M^&{BhEZmUkl zaMRd=J1P!oW=}os@)k+>oEV2ZYS~rK8{Xj;;HY)rY*T;Nm_S$XarUy1vFxI2Polj; zg4F|*C8t8;tl1*BgOS(UE0g9etxiTDTfbY}nj-7mxUwG^RuK5{ZX+>H2C(w6`f44O z_LaOZkG_Z^xR%3$eZT6j0oi#zwQ@CnsRUCfvIRf6T1aw84m;w7p9YjZ)gUWv?Ma>v zpftKv0tx#~K9X?^iCwAaZXw+yWN;iLvgw1B-WZO56^~lQ1EkQfaY=-miZeJF1F?Y8 zDu&`qrUTSK4`?IPb8-Z<2>%3yqU_pb)Sds5>q_cNuZaX}*`aW78ubo84r!FEzy|$E zfbm&8pWR3rHI!+vV|;(a4Y~~qey4hgfNGem^&sn|k5SuiyG7TE6WK)LGzvH(3fK|) zA|h4uMtW7Q2lt#nIQ6UPRL6I!hc=8ARDX-dUYUH<0?s*&i|J~YWaKf;b37mHl2_Gg z&VpcftgFSlT21n+I{RXW8u95`7UG((G@=?wNMlig#Diy){ITQfKfxw!ChpCb8t!GD z4&D=%UWcZVlJM1lSH9*UTe^%y+=rL%2rz~uuqhae8P@$F4YQ+ol7K|bK%%d69Gnsi z?FO9rTnGZPvW>WJQ6ATTJl*)K7CG{Y`gTnkdZX?NA^oYG@m5DZ{}Vrg(*1;b?Mp7&*j&-IO$S_n47SQRy3!R;&PcWsRr04)dj z)CjpVR!8HEzu`@z2u{b5-A>$N7tnQ;p%&c#NcO2J4CQyO*1_0aY%^BlxqXqf)%xNo zqd-s?#lI-^d-g2xUh!X?!P0M5n>{!Jh97nFCV|4|PO(OE>7Y-RvuoUlCYLO86hLvY zbT~|ENFUDj=0h18X@cwONr&dgfGao|@hOl-EKIDi<9a{+a4qOf1IMaW4J8N2cB~%c ze+Gcx>Lo61gLARluj8z8QV!236;*s{5%vYeKY{>BMIK^onx|blXsRNde^pX?>^y`{Q8b!NKma)dJDa&<;Gbc5qxdFleP#-4zahYtoSYW zCpRRhiQR>RfsHY&8_?yeA)^`R@&+eikVd)>ryzQ{HtQjM;2p?{w^dCn4g{;cMb9-E z?}4knpqQ@eeguf=;l~{#Zi^OBQvoJQiQ~g_a7NXGj}P9te4NviI+&>8;J?3^*Q4k< zXf49w|fZ>)5fWw5kq5CL3%yp}%X70glM#!+xH6k?v(zhvs`A zpjRNfr?L(B6Y0t)kLx|i*#@EXB80@?0E66)q|>Sn?KyFGG^&7;kQzhuyH8rq8%rfM z4_(Uj&-vE_*bG1V*c9F-0X$y*JD1l^|} zr+pUabHvDMO3FO=TRXMmb}}4$lbnl?T^XASU?+=h5r5E#D>~%{5sKK-qG=j00kse$ zyvZ3p$&)l;6}hi(920 zj|vpo?;71gJKb#cWgJJ8ljx)(svUiY*wf-Q5=*8JHmkynrG(`?V(!z_-TzxG1xKZ1 z?12k|=a-LFI$|c=p^d;g?;3#7T(V)H5dwZFc+01Tmrsg=H^g4(nVUkxLF?Y~AYHRU zC&gJkO!0hsi>94h1$3d=MO(>l}`Gz$zy^JpcoBq8r0gz)5+~@KVCD zP47?TS^Bdqj1_nKgoSvqaeGq+yp0f1-@@C{b3(W}hVE zOMMtK^|riwb|U6(qZ4b|4~jmuL+sBOZC7?yfKe#9#wjEM!NaY$O@hA>A-s~YEX`N? z;euD#m&#&Kd1(s-t1Jg!6#&f)b96NGJftuCDj~;s^gv7j94sO_niU}ehQ16811-(d zp+v@2UgL}4DRxQ)9QAI=B@3X7GuqU|uJt;SK3b4o#^{Hq3#f8GPH$;$cxTbsWSj4j zh$NUGazFSEFX8(fH^wbGLpzKoEg?Y?tx5&uivN13N!`OrgG82jmSy=d*BGgU$o9Ed3AC@WWyp% z5hcFRXp0(~;b&T2WUT=09H)>Lmz)OBhXd;XcFJg6)k9J6MBBLvDq?w}LC^ynfGz{Y z=+9hwQBdm>o43SaEf_BBu`Z(uzNU%#jDo*tuxe8)wgM&7*P)e-mlhkRIQe~M{aKR2 zMMf3+-P<1rT|v8oL($nxgs6$Jds9_}BB~Xb3%_kicq(r{x{~bqw&i(ewhXft?AGA2 z((+*$eW^l3C9-&#iw9m8Q(%a-g7QBBh+_nQ_HhSdHyhddS#8a;j?xq=uq9O@rxo@O zQe=td_rpzi#lH?jT+xQ$^-{_wBqta*Kcy3gn))ne;?QC&M{4Yl73qIny$E2lf+g?JJ(nLyj%FY_HwgkAkN6P(V1f926ws z`v9AZP2)qWWe7uOv(uxzmWSKe$TjceL~`M5h^gA<>u;#{&!M{x!z3u%K720Tq~Q2c zWN|Z|KIIA>U#3n6#UXXYpZN*SR${_0VoKn{YH&$BV4xA9VIWhCdfap#b4`2cHU0ic z5*5_-h(X(z4$jKZmqS-^K{TcacXBOoLZ6XgNH!Id(dmYara45QwRx>$UR~m<)ri3M zTytgD_CV1MbA}BMmP9+Mre_qWTDG2_+hQOe)q1hDGTheaSZnM>Bp>b{_#~3?YM#)| zxKYm9uS$H(8w=#wO~IXpW!mh~A-WR5ch5Ss2h3o+cceohlAr4sR^KFwU%Of&k58(0 zGwy*!gDzVupD?a=ldgXxT2z*vkgY1%n=;uuZtKHW#C@z7Bs%@OBqJH&4Skb5kpbP_FR0zsh#W{ zpRTr*+X)BZ2usVYll!N$!(V#PS@S}s27E;*8<~Dx z5Fp*k5X%+n`Xi&3DUqs%r@#N6`F;GwNENm00&0IJZ${da!{8K~eTd=~+=^qTXTr8% zqyF`ggR1O&Ph>jQ2ASDroUw5ZKI(;Z(Cb(s?0q1UcIo$+j5RxI(`QIQmtxhNicJau zN?XdT3%aT&=w7Kgobqs3OzmKdzMN-(|NSDb-BaEvLbS6GMjzp1%UJGpWg>_#^9!sM zjGhYi@B}Lv3|mXBj5Br)c0fJZ{_|L&VB27iOZkV)-X~r^*O9j&-?|`pr5GouI#)zL zd#5M*hFTpLtPf)wax=2glMXOmdalxY(?B|k;=z?TUnKekbVs|%{WqB$DqBiha+(Wq zWA~|~;$J$vWRo(Dn_)wuvwt(c_waEc&EyBe8^~rS>Nt!E)6UP*D`IWa^4yE&`7V$&etB$3}OJ>*-wK&zMU31L){$Hu*N{8?|u+ zxWBw)jLhC2rO#7=UR^Ro#*Jjw*5te%&pRh8j~iFDX5ky7!$x=*N}2lbj@^KCY8oA z*#;vr2EUz>Ak*R#q<<6BHOZiJg)L)h2n!EfI!(NPovVleysE3FP(+2kT;Ft^sfZb^ z{m?2}M{g-0*^tgZRaF`zqrI~)_Nnc5y9|kp#dD@&;w#e*1C;T=Ln0tv^UlEx33fdY!xi8!8vS>3HAcmEi$g< zEiMx$j^n!Ih`_qRE=6QP@{eekivWirJBozI2nvc&s^PxF0u8UJYJZ=?YQY?+_LL4% z?pX_DAEM2R#Nv5{Z(dHja;@0mlZ1Wd6Hc7`D^cC;+Yu*GJUs(HURAQcCk4>toYMg=Ym- z6xqR4iI73qZ77^Hrb+Hfv2@v@aGLj_slN$j%xq~BYfhLs2w_Z&cm#MeuY-Te384)x z#fihd11|P#7NRy$_0`1?)lzFI2+BM88XzMCR}X&~hF1Gi7{i7UgvU9?Vo{KePd<(l zcyIKbE=402Ef0`pB#()Y%=})zNQZ3{L(F{vaEwunLIEY|Q07QTTZy-Uj3(orb z$=RFWKAoSjh8mqylR%!2qf~`*L_#voEG%Z#)m~RQDW$S~{#&p_6?aq&GgEW-aO`AP zk8ItuSFRp+*kg3=9?=`Hh!2m9;^{%$yrWjJj0*B1db2#k%>Z_rV?B41-H071Jf~ob zOrPBF6(Wj!OeKda|2icE&3k9Kd$Vt!$|SymdN@Vq@yrHX^2GY zCfw*UHS*{1aI6&`s7#8$J%;sio0xmeb$R(qUMgxq&d8fRTlu$zb2qimTD?eSqNH4+ z?kyN>b;zJva2TH}psrD(8r7PrR)sP)UP|yrTsijLF+tq-OIPXu&(34-sH**$Kqem3 zHNFyTq2u@uu|fZ0+NOokUH=Aa8Iv(DdrpQ6t=(fC{ZU_!`SoTN(;Pjvp87$wa>vyo zMV3V1FewT?P6kOc396Fs3RL+ZXzH}NPxhBS;9Mq^*pQgBdG_RKVf>>EFm-3%H$MSe zqNj+$;CYw%>3%Grd_KV!(-h7()k1F^ThdPKOvP_9G+@|L6zba_NL_vIV*xgv$n9l2 zixyQ;DUnTIh)6ZTRsBBEQRODQZ1^Qa6~O5SZcQUzVDO)Hb8IgRRk^hS8{$NMLIk-K zK^goVck`9|7$qY5v5NTbp&1#8A+H#es1e5zIJ4~nAeD#k_C*;e0A>6rr6`F(>gpoU z9UyoJ%6qM!`$ldRql_0ZH?e!IG}r#&&)m3tuz0|~7a!H))4us*%25AQD}wv)@8`Nk zy%mZmUPnSsr?WjdDO;7byISu6qA=$G%78K?cEyQ&I5co0yNorQFfx0W9XLov&e!vE zvU-~?#(q4~R25v0ZnD#3dCpXb{RY%-+4LsX`>Y+EVDGi384~Yl;-kje0+y)d26q#m zyt;1BLL!l_W<9+wO%eiOd=20htfO;x9c*9y9z;(e9|N`zK~{*XfFOR-L*_tIusU`U z4D3?<$crX)p3=Ap=|~zP4}y3^NN*g7$z(I9ckX9&exNomOBh$zZ=c*#gP+E$Yol;jpV5u+70GI)x&pVe4j?-{&Jz` z`GBe#REfi?61K$6f98dQZR8@S8ejkSJxGM>aNGslPSW7 zORqR1fQtFM@xdsy;=P#1_?!PCN29MX3UVEidsFb=K?v@60;0&sc*E$={s{I?Q5!|l zDWss`Xb3X7Hkxw z<5-9o4S&VJ@jLV?8gk|Z0;mct74>XqG zfpxp|f$H`1k4M~*MX6^O{2uIF0Qp0zFR|wIBS#^O+aJTjDKmbmCtVIY7s(H+9GRk? z(zOKNhcN-J4Kp96B8qKWqQE7)I&wOSE2^b$*(#XPJyNVPzX|RHlK;$?b0k^Gw8)R( zQ}YA;HQWbP?X?=tFO-0!bzBTHuD#F4+2PLe(#C9#dGY3T+^+j8WyS@tU_dC90UKpL zSHQ3iJHAcfio-3nKT`FUE8WGTehc+^g%cw({~bPdEd?l#sPf33-|IRS!a^#VTun9S zoviBI)f4;2yrF&N!O49g3_TEg*D_yK+qINJpR)coq;n5QU}Y`pOrUBfk0+y7-Qy!U zng@I-s`KMkx&vigyNKI3H}`e%ZE0;nlEC8lbbIWMq&9`IY*bcoi`=d^GJgB~G4!F! zxgeruOjV!u{%xC1?M)%bV|t!j2KP}KMeE$n;>~Qdc_r~r%GLERZUagb6$(xW18vV@ zd6HuRA4v|p)_@h{Yse-2B#G~tjUbk>LCnvp)wkUk7rdRJHYu{J#MyTeX zueow!7reM}3QI{cEP^+xSV);CX)t2gBtS?#%y2Au4(`-K!>~S=?6}G60AXt0)RrtI zyAKc|4YmeUT*QYjX&y5Ml|QARmAzu)omY!fk>AT!QTt<=v;6s&By1>*&aZ{A zH8y0JWqA18lrsih&i)>i@tDAuc}rC7&woOwUC9Xp_=^G?fL!ghyA+xLk@wg#*#GKn zflR9BB4TKu_q@A6w1eIrrZ3;ehBQ}OtQ z>Ob6t|nZkuz_Sj&_skvQyIO+MZAzP_&Z|_CAtSAgP{TuoUASadjIs5KNApE zoBaxf2>2|`&Ox3DtrkUnoe#=*N#N~?E;Ee;undb?=d+65)FldCb5++${Q)9#0ITdZ zsz_&13@4itsG|;4bv^Gy3IsDdQU}r*Q)$4TxPhxH7n_M|#C80BcAxzmRittbeH)H7 zM#s(#`$ClBlQP}J?$)q+zwy+Y6O@RHEx=iv3grH=Ii zc%!O|(&4ZnZ&S_5AmEHMruHPS9;4DO?+v*y%^MhR@?vm|NYf);LZX>t?x)aM$;*hm!LXI^mGr%m+3G2SbsClA5Jz>p`Afu@7VRc65*L~tlr7%19$g{+K1C>SV0d#Qzz zHacU&hx*&8eM%^!xaEB2??cLO7*9A*i2ym3omeqF1*HL!TQ5kxHX^H60O|)cGpH_3 zpnVGv=0Dm;92{1@yMtA~u6s+h2wot)I03gJ(gnI;<7P-0p4yjHdqQqbtL^xjz6ODg zI~`o9N+_rK*(b;td%#e`1jY?YF@ySMPLlP}9Z1KM4b)18=J&Eyv8qPF349U5jc;!k z$m4hHS*`|WyvzYj3Zz2)4waLWC`Nrq`Q3TlPQ2DsS``Zh9bAIV0*;ir_3Vr0(rou*?$qh!8tc@i8KZOf@8a-4 z?i`aa00BQC5;*3vj=Q1u9?bYQKY^CdUxX^2VlrBJn)SBtAIT%Jwj=XC`ylfZq>m(1 zl&p(c(BE|7)Mox6Oym~xKT&bYo8_ky*e%L(Ji5~(bz=XwY$*v!a`bml&h7C2^gMJ& zDV@Pf7FBX#P|f$>?%sake}$?5VRJs>_u$dfK1N=to6D#u-t75# zPz|8@&d7>0RptWOVQG0s&|6w$o~)T`}iM?D{X+?qI#zMMp-{ltAU)=?CLZY7<&%4j!L z_KG*(MD_t((Sul4{rq%=?+o`cDEip7nLTDIBq_xyQJ}U5=7S{4YFK4BbcfD^#!Ezo z>l<1P&{OzXQ2Pd|P_zUm09@#BFi-=ETasac5hul#@pN{Uzut|}jH;LHhbrHh?Hj>m zU01KzuWj%Ox`1}w1@zwKF5{Y7Og5U(?ubBV^Wi#7;uNniZ&6(>?oE5(d`yn{5~rue89 z1{qOBr~NE{(Ng&jhZEmUaz2ruxIjv@#f#U=<4g5Lrwoyq$}p+geWcb5$Jndz=zDE; z2kTJh&bW*{T}{>`0(S3Or=`0PaRn{|dfqWow7q}8O<5PJ!vVTjvdQ7E{D*=m)WYQc z?x;0C@%$G!!pzvnrfI(eJ?Wd9v4rQDBPn%3=4a(qbCQ}nm zE3!2nPJK$L9w>5UTDg_;q@Bgnqix1m&6soC765Jrc0tgS(#yNkv+;j;x(;|K`2T;y zIa~JLXBx4T_>G*`vE;&pWazDc3j&$!gFwP#NhWGOwi+rKq(3pVRaE zzdWxVJ$dr^e!rjb{=7f$@0S~#x=eWv^YIX$mxpI3zye+n917xIb%ZRyJZyz2Kjs^9 z8r`bt_Xzv%+hdv|Uu-*QJAzchgk(2=OLgXIo%?ihe5H~TAAO9hu(qEJA%F4s@O%?f zNSHP{`B2wP!QXY6pZ8bu5=I37fYb0HInw-nt#I?z(0CiKOp-aB6=J~Cs+Ow;R3_UUGscGIDQ+o-l20lwvA*8PM@;r2rtgg3%+mUe39JN7J? zjI>6+m?!uRTAViC^Dav-2Zr+9jOi z`j0mZH?$IuHvk)FP2|dReM7z>|00%?o=Xrq6LF{o(?9*^%2ZNPmfnW6j@G zaffbijipRgrt^Zu{Lp2MyDjq=`^19|#Xw5$Q=J|N3ql@-zkz4OMIM$MTPqx|R$JPj zHJnjQOT^`8@RKD-pHtAwGM>f%fA9>JBd;-^Z4`*dlM=olZD>EKAK?J32n=^W{_v8R zCDWFU!VjcZiDY}u)K6`SUmqnZuIf?e&q-+r`?0u8MA@FW=*V#$Z`ayJ;C6lE3jEkY z+#WdH@H!r(WPLrwubff)T#0SOT)Kpn)d)8~i5k+VE*{%K zR=Q>3O3s=d;}^k>`ssg`K7!+K9d^r5pzTyw(%ihPRbl;S>oNY+2EmKEhfGWa_d1fa zPsy6T8cz%wE`FoP6aq9duZ{U|DQ%p_A~4MTF}z-c;k!=^D~tY%_8V)XA=NWWQm@j! z5XQDg7co9!*@zV<%P)wg{iLb!BG@Z|tH=EO4&B-HaZ>abDqe;Z^NQA>PE?c$V)_g| z+jzI$uC9PCOn0A|mH$3U!%{Zv5xz^{MB6EllSKAoTP2=}{o1N`<#PFsB)uj(sraVO zsz{iK6-!1cRWKfG+PK>j@3QN>>#jbE$$}(mv-2x5+4GnVH1K33|$# z8Oa>>EAS}&W_C4oU7IW5l@&vW3!YHNsXd#{iK48SODFP~zlRaP%yc?&%l<{8^$cn~ z)ek<`-mJ>{^<&wk5OsofwWzj6rd$kaR`{(@`q#~X^x{6u@RJoe1ilZVZl#CYnq-K( z9+)(Wj;g{cY>B&-8nsGB#rCTVG}>+6eG!T5|(i8aIDjAlJEcrWMtjc&N^!wmN3DNX*yh&jXXeVu!)md9gg4G0Wb`ZQxn2 zt)5mA1vPp>9p-PrTmn>S9)ZDNK8`V>I5Z;Rv)xww_rZjo$YyRDy_iLcz{T6xW(8t3M{mBM{y+=5_wPegH zl=CZNywK;7UkJOlz3M~9Mkn{^47}s<&qDBPcWeHB!I%j#W!~ky2p=Bu+|_fK+u^8N z+k61AnlM|2eyS0^eKjPuOzMSAio+V((_-+Y=B1~ECNLAcmf_IJLf_OQJxAu{*eNh4 z8a{|Jkpjw|w~YKvNwoEz6st{z&EKjD=zexq^-d)HI(?JN0#8t0S_}M|PXx_jnM~_H zE{=jDE2dNA{ywNhJ;^ay&GvCLqnHn7nSb@lR`?J#ey>No?oG~sYzAGQxkp31HF9^@ z#h2b>UOA|>a9KgkyLxU=w^G&4dcMQj=ai&oA{+Qq3z+moGZ+?qj&u(9`(J{t`-uQ9 zMj8&{U6Qd0hINyl`f=gcTEu#K?Hey{DvtP5JGZ%PO!($%_fphH7sZOLyI%dMvi(i) zECEmAEPuLLFR+=$s$jqO)IOg9znWMrsA=Akc@{@^FOk0#y&u_%*#3H*zb*UN!M_6% z73?K}C$w4D0(FWB^jH}pWw2_cG2!6JQ!kNBq1mAOt;bVt+PE4CQjic1O@MoN=kZJe z!a~TGn8rr4m6?Z=R+}gV7BcAoCLKLnPa}Zuhp+Or?`O}lW#Dta?Yz*YPTJn;Xa5Fb zYxuE{GXF1V-&0E6#)Kiq>fRY!wbyklX=Bg*+A7--byltQ&j`jU>)f)FD!lu_&qc+5dd6tD~xQ%aE}lpeHKMH z@VGz*^Ru-|yD+a&WWGxpvE!8s19&j9nxl1VMf&7FF^XZFp zcTLLHWABp=O{6h%t}lD*g!V)S3Ml*QAB-1JZjD}-8OkNs;yGEQb63-MaN*;Ri-+;% zTzW+X`!Qbue;7)tD8g+wNvE;%a@jt;gBy6WJ3qokgH!XEDwGj_2#jJ{bpG{@TjFqy z^2?UBOs8rz{&$#GIP*Lr8VcLLh<^AnANKw(Z|^r6EO;_MUssCHl511gixqRNEwEj{ zlf_~88E%QHzz^ip&S{uRg&VuYYf4=!unV5<$oiYlY}(R74-na7WhiBpcmFw`m=;Bz z9?&hIp`Kq`Z(E2iM7J#{&37oPn)c;U%;~TIM}GWdh$L!$cS4E>ZQu1n$to=8>(RkC zYT3p)(0g86aSABw{UJLr|~i4;ujK@iwHGKJ*q^g9dR-H;C!LfdV&R7p$W7W)=Nw2>15&`;6g) zR`u|qn6JgZzo>@YlQes0dK2$nx!%N1iuLBN&@X>n1@O#r=@2{KLAJplnx+}kDv1h+586Wm-uxn(Ytn1F@gyLVZHx;cb0m@tZQNn|~zU?)n(XKULssm%!ae zV0R&h&+up0G9O7=0fm4& z_b-Kx2yu(qM!xtqw3@r;Eh$SVkARyy_O+H%z13RL?Sx*x=qh1!k94(?X>Tn z$iAr%zc?^?GhL+kdp*8SzsXr>hNJ3 zNlU(EIb27%ELjK~NO)m&Nvzv{GJ>T{Bjf-ghZhIkhq-~w&*S&*iy+%jZ`1+uHu44L z;==DW?d{w8tS4{N3t@@#TKSFw!Dc&@4Q#fKrZXkB!hp>zs6d={ZOx(m#}r&60>}dS z;ggHskqc<<7@W}QXZ<&?vu$?b#JxaB@zm?p<;ae@xd)31Orf{?cgRt?9-_qeaAg54 zF*y1rymt5ySX)_H1qyDfiHw4gT@Z&pr$ZnoJM%V$8vsazImp#H)={4=Bo8T6SQ^|UIUk_yPAH1u20@jaKi|1 zazpF`zZh?>77oKfpdDjPDdr2SYD;XW!gNl9!m=qN#0z+#4^A!B|ItG@yOB(qF<>TN zg*{rwr**3D-%pVU#Ckc`7JO%Wah-5PEr#1JPA%%H=YkV=Ka-v{A@7--xWQoBog*Ca z{$-I!Uk-g2HHf{2y*?{%1KpkZ=JC}cEDb9+?f&Mp*Q;X)UHIrx*YvKr{G=?>fWG9cgY41N9yeL= zgEL@!B$DM4HHxYHIDASN;sRnj=r33|D`~TY*A^lZ_c|5$qcBFUwN>H>aZREk-B2-l zZ0*4i!BV7$#(c-=4EBX1*G}wfaH!a>1@l8FrRZ1_gQo{Y=)1D|a!fG9r1|w9&kIb5 zPE7K$(;lle{N4y}0TYmBYU*}rZcs_7d%p#S+4|KsJ?Kk9&1IUma1=2x9R49SokG{4 zGB#<8pyQBxG|!EjR|}5jFyCAifE~xQ0;(=a{)qnIrsX59+^1HF+WozQ=!m`*0KYr1 zflmjynVLpRqq*Z)wnpeq9($FLf?)khr0G73JB{Tn=^IK8xlxNawlBy$?C_GQ1;4#O z@+wP2_{T{F&C?$qKF(!2P-epl48_b0!Yp-u3X5&5w}z#msfr8tPCoT^zCj<_wl*CJ5%c2LK^M~=uy>P9O%%#Sc)!JJ z5R(q@FW7?!1*R4+McD@luiL7AFWQk2mSQaHceF+7-@B?42*Nfw0kyw>G9|9m>@rF) zx_)u))k9KmNlx34A301$YCEB$&fKDZ>2v<6F5Ct4$!(kPDSa1i&bhf@Hn@cD3EJwI zXwl2bxsh4*mYH<1T{*&-qVV;e{rNpvlF0GyJ?mROe%DE(QGQfB ze6Q>W3r4~@*;>Mnv=@iY)1SW;2#cH^K)SBNK-;cQxUqLf)pleXx~=Sod`qZ-uDgP_ z&?e{uM5W1x`P3eWEquN9ZLtdDaaDrVEj#mFhH~&qUjywDOj`xv|DI*=1{E7^(1BU+ zUK??B$mXP`euRBGQ{@4wN2_bAOkiGRmjH8lJP(JTO-29W5A4nDMZTndVyCj-)6i5j z^$LM=`X{212W7^}kvC;ot!t5SjY+M@4)zX0ezg7q5l6IHom;tuRX9-^d2&5Bn0uUH z^HB*IHTN)DiS$q=U?HN_EI%upZO(Q4v)<7~^1g;>8cBF74L-)NCu}V_<5(?Ierzc4 ziCI!3^Kzf=jn9T|wP^#kDqW^0Xlu!8IAYFKM=|_1eMg>rY98B%EUiu)g|U~ni)SmE z7J&FGM|||n8neZO9z^iE2DP*@oJS4#^pkm!j2Bfyi6eAxVqx zyqgK@@`)?kFj3CDMV&S>db7rTK?b!8PN*JE2#&||QVmu6Vu(Lz_q50NQ6I1;35lI} z!lyPxSTAc$s?aZKy~W@Lqz*!gD#+&Jtwuhb877D9LB~N*o8J_2RAqfllvkbRPo%vp zQoMk$rQ*Ub?Dp9lVZ2B~ypfDI<9O$2rmAu`Z~T+@-XVJJco7N5cXoZRm$1YW{vBKR zo=|VO-_DTMj1!nSC>AUg`$NMzLe!_+D}g1{WI>-xc4?4=VlI!U-V{Cv22S_&K$D5fG$J* zCNM&LWi5`A)N0zIUS3~iDHXo=X=J9AaM))(QjT<)S-wb5>o=1VS%SaH1cqH*F)T6b z&$cLARPxTKb?ILB6xjRKbA3HDC?V$`Hh+zpIV>HR>AOqi$?T~lhr&Hy$%3#>57r8Thl!Ck#{f_<2e;yV-vV=CE9sl+C64iy8 z&DLPMk_{P^T?vq;7&y_w6q;61#K*N6BN5y;zymApGN)Km>=;bB<4zlD=8-y)3fwCz zOZkuxfjh9xq}YQ_B1eg1WqN~s3>WznR8dAq9d*{Mq2AzR#hwB3@c?81N{RmNef6qO z3lpYSi{6oMKi)sFPdp4dyp;mLc=1|~`x-n6xGEg?GU3PO%|$N`$nZ+TAv@#*I+(v1 zbpYedXHAu17ZW_2UV?=%rBlUlV+buR5^s0J|27!R9SQ`|OsuD@b_lC*t`s>2YE(nc zz@-fKL_I3Es4_Tdos{#vpVVSOFGo_Xp=D)0`xi;=^yu~D#V#4*kZgxNjL^L~p)+wa z*Cbipo~!p`W~6(AMVrA@c~*Aab>;5mMqgC*N8jTAgC!jbg=~G7kk~EFj875@&jilZJ?J(A3-aNCyRoQOa?gCcBpbQMQ(*~MVQ^;7$L`Xu&*Uf z+5U!RufJ{l`}-r#)2(K3qu<{xPJnG-cBDSFmB6#SZLn zZy`+$Xb=&cofcL(UHX6nvNOGM!#^JZGEbCMV$ zOk;Q|;9~wcwmY-i8mN)nht>Bg$2ZWjl4`Ox7iFj){n`5A_FMrfPQ#+26)Qjev>(%@ zs;bzxAc{Jz{(_)sdhrV6jWwStviHBQygjLs;GN5F#TO{xTNGiKW2x(E7@h7IL0{oHyCY`)#J6MO5X4nrV!f71fgXTy8YLCpBAIh8&#_&?q1MA9x_ z>Nz%^O=CY}tFRmB(g8u-U^tQ3N8yB)Z+Z)&5j8@|f~+eAh_7yprK6^L2R&1E=Z4nz zx*F=5$cDf1qa~;$6a+zK&blGzBLOs} z*1Nvk;A%NmY&U(!!EqrYN66y-Pm7*63;1ggNi}?jco|$$x!~PvAF>Q69Gp45R2Jb= zt780$C}!hN4Irv@O;nZz7SvZ2Nn^379ss+|`?k=2(Y-R^j5_`9d*ZqWvy<5h)tsAGCrL zh@ZZ!=5A&CfoPt{X0h!F5lnH!2(JvqwwRPnnI1OrR!A+fF#^xX^)q zyNlP9x;-&Jp3vLXYx%lz*XS`{ms1IyKJQ+ZtR4^aCMyEHwAmFyBq>K?o@&{RSZ6#6 zB-@&Z$&2StAD6^?-dv+po$yyQr=L3(^;p8SL$19hM zd!YetqhXNThNjLs8pEI19B_d84|2=^-J6*tn9c@y5X$U@X4yWdgJQg|$KZGH5YEnuudB7b`|xc? z+6#C&K9KWq@@%hHn3xRWPNqo|(~^oZMt}lTiSF%luDt>t**ZcP$k&VlS#VMG*(qG-m$@${7l@;sP#+)l1t6pzH0{l^5p?39!k4H6eq)W^Eo;(BK(#B5Vu?8{j9Egn*`tGx)mOXLxhmS%m^Ib~ zo>$>jDbTupcH(-%{`m@9F?U~7?;6qJYH-ci>5i)L78G?8Y@omrN(QIhAHsfqG2{b~ zu5qZEBF^k(wg6<%qm@Db<^IpVl^O*sQI;wIXIj3Ru86*32Yk8xgqFBI2?UdU>sY$eAddAFyRi7SD=f7Ga3I8St4o>w>%k`d|4hc&zKYBpYQbs<# zaEVtql6F1}t7goyJZf zZ;H3e5nG0YZt`ZGa_g|HuOM+FrhGx?(iQhFC9Wb0brCzwEFnM4T4ddh^fo2cC7$cbR5t4hIHQo#UI5yZsUZSC|*L#4m7L#GPx$mL{(q z6s+T~CFJBn68neBb~BeMgz{W++IUkdV(qzkwj@`Y%g2=^n=vw;tl@0MW*;(Nn|*ZL<>`&|KX z%R*?EDU(cniz>$(z#C(^CR{bDIBMFNqRsdf+|;4DCTLG! z{?2)B&;1Pm%^xRPrT`Th>aU?NTnkH@M*DN*H=%JyqWPNn4JNTtPUH6(m{J5*C8BSNHxj=$~o^s@ubiF&Yp#3s7x@JRKoAa zTvh@t%xsfbyf{T@Fpc?q!H8$<+-|ZhX@2$jD-RtVBkG z>&4(~X_x35_~x=8dz7j$%?#QbQqJm#Gn|S&QPS})eBU(YuRoSB41Ygk3!AIg7}e-0 z-1tu_R5@}8fR3z*TwpdcQ3(l9A=H8g6g&%ApsxPZD#SxO`G;*5QpxrUM~a$l!caeX zL;E*E0J#o0isugtL(cX$px_wHzokww77cxM0Axf@=9wQhIGa^0EB)3zhyFq+WSe1h z>Xp76uX~>n80H({S4E_{8us-9+n!bNaRP_6<}3G0A56J&Vj!VoF5lNhxFBp-$#b=A zXweuME-Nh@FYV3tu$`+hu~K$`2WKJvtNX{ zo?HUg9yec`LgUG@mV#f>ilOUKVsBAwz_PBm^CbGe+eB&$9ewAX-%SG%AwOqu(vuw3 zJ;H}=w-Aba$`6Xh-^cVB7)j6E% zen4|u&rChCWGY_&p_lrLVfWn;JcP*kkj1NvVt568!Wcc1CTW7PRDt2wmIo;8 znZ%%slH}r|%ISC@rsHqOopbwFP#9I9F*DnQkVW}HW#3QcQM->49Z2($hz~ldo!Dg9 z=C9cE>ck&uBDJg+LZ5G`am7#IP~^f9WDo@-fpX% z;+sqJO*)OOz1OpikRK-rUvj^?GTb2AC@tCaXiZUu9{VM7CRX-sjwXtfmdw2|_c&yR zxwFEihP@3y=K3O>=!pT_-Xm=l-#iosu$`Ia=Bs_~`9&I7sp~N+L{`N-p1y~iC!SGdpFtH3f4J$wmx9rCiL@~h|Bz`JWR}NPeQ~yENo0H_wmuTSz2J#@lf=VO0D0W@2A|2TOTf zj{i)G@lVn9=n1ht4NbU>pc9oW_glrReaAfkZ>%ix1U=TDywm)?-+Zr?C@Z7hDq}-Z z{_fPQw>-kxSO;La44kYuO>+=6QNVouS)8rp(=m}-vR@0X=v0pAk=K%L#?+WU(CnHH zgRR`o5f2vcc>_E*}z? zkV6JB&*TIKO?E#}!Ca>+Ru6f!wx8VGCbF=J@3c5APE#}!YHr97bPhZAR0-dW6=KKH zW2fG`WIU2Bi`FNJ9^dlzV4hXmiz>&!z8cgkOkJ z>1wM*H>08jf60fx+5LdBE2$#Eaiuw!@qkNrNg(%8SgFbVZ&bOy+{4v!#2dRLYs*@>AZplVD@lu>YKNs$u0b99{IFVb9wKYIfja|Q1ZO!tDK zoFA|izN?iVmrT#?0?%A9Za=y z-S0eRB`4i$X!M2dR(qx%wT{mcbCFkb9ge$rW(YAddX1?d0!J#*&3$)K*VQBj2`y=9 zkMCyATvek8kdh-YA6&zWArdo~uGns8my%_jRtby#MpY=ZQ35-LlxloSouoQ>N@RpL;QEOJ*WKvF5GUKO3qb z%}^5pBI+IO#o)t_0BdIYx@=1*bzeB6_d<@qSSo|Ye3rxm2#k!a>nMf`+BnhS9T!7d zfmYFmpYJ{sEdq40;C{F8&HrVrZ9bR*>>t5p1;(GD6A*YX5{gEch_b$R3MQY3-zt1q z%VH}0pSj@%iyJjwSXX0!0d$jClx=R}V~H7cl$8Nwzoqg4*C+sOI{sjN<%61S3HH)l zG~ZLO^qbfn$N&ij(3!vbF0v_yE4Y%ZIC0cPDrim%n5QfRN2P{)BOs-UDCZ3Yp9ky$ zHHm?q!%%-zdP%^Fhq^GFKlJX#VTvH zHwDgYVk*8Xf6(uU&snyD+yqhE25tU#_DE5)nHWk3YLw_N8`7b9K`-YBRV->V{l)uf z+6t77@c)L8L$+zmDrADNjI&$3n(CYMSFlc9A(6IS%i`$?nm-PI&UEjE7CZK`{=|lR zZmzqFVx9OVlk#8L1-$vb7E%nvl<}M9IYOtx?Z0_k6Q+C4MfLE&ZKw7`?xE!>yb2`S zafAin>Wa$0<5o$qZ%ZkPrh>5L$7Tr`FqbnM#kB<~>3m)rxFmKZ;}iOK=`WC&%UdvY zyio|rEQzmO;4L42(P6y>9bSw}Q?=+34cKMt2McK6c|HjdNS>jC-r*m^5=&nHWVTs;*1(A17Ik)<3{IP!6wSJg#{Rpq@RlGC{nms)tOPUpEnJoCr0{)5kc%x)VA=wUt2h$K3dm3+U{YO1Ditc$N3&RL)GI>h+p z9B}tc#qh1Sfy`R3wJcNJ8 z({P*(X9-G$e#L5;w;rAicY*I=FvH^XO@vrq zOHsdZz5d)-*g?_znnZlDlZ|h%Q>saC3pYA@as$rii~=IqBWtRb%^S*v8=h!J5JUIg!>sxrbc3mcf-1}pacz1)6u&3ve* z7I!V#oKyePJ1ahJkHNm8jK}9DZ=UTqk^3z<2U{ajhOQ}MXcoDUCS#hL?8kCnx^QdQ z#`HhIYmc7D4K;GAJp&x-F_DkLG&Q*5eqm*>XE*J_PUeQ-ezVZD#}`9XF)+md>?27! zv;lO^T_gJ3OkU6IdBcWQi2j>XVh=(%h-iyTC{jutB6XwbZ(W#O#9xM|Iq+_#Z^eOG zEa1Ne&x|1dD29(j-e>>39tKgg!@RApZeHS&%ROgJF?Y{OfIW(5U#i$X^W)|;o_YqP zpu;-;z(FA02^K>ZX*X&hY$3zUuGD6t&1Vx6wRS1uqxjVn_1X)5ycrk%8m}*kD2WB3 z25z=c7OjR&%_%W@N#&*L{#M~J zUd^fX_jbW4YxDS?l@4ASTZr6)0MSC;Zy&qKf|C6`fc$m?%{f(C-8_z78No5$yT zuU5lvD*)=#qQ->=?bTEq^#FbUCE7=*%L*WG_skZ-1AkLdk+qLjLAddzs_1zW2BYe% z$loQS9B6ozDzaT*Aqt2W{!^ILrK{72La5?WpBe~{q}=+ZoAL{R(@FQ$QZf7PDUf`U zUHET6_2?~@H>_G}cG+YXiBSDtdf~m`@nWASPY4T~O}zzsjiU_S?mRdVY~aj(NPwvR z=WChWO4vSk21(P!QzpO!(0GqCcMDe(Y>P#e#q`5{`WxXP#z}2u*Gxn{yTQ-~9+xoB zE4`UJ4@Fo8^(LJiK(8{U%$8nQ9@P&0BgAqCAQ8%^hUO)hp_40smUPk=Dy^~BZ2rZm zQqX*eFke|A?`w9AnBqn=R>sQ2FRhZxhy^~9bsqo9_7!WBmXu@nP!bElWNrbw(_Q-DLtBRx2 z+czRtFd_5SoGkYx+qj6c!xWHYg*4T!2x?p4#^`wEw{8(P{mG_qDO`bczN3uid}82y z>)a?W$@iX}ll?)Db-B`aCeki>t!oKdnL3mQC85ikl^y{NK@A&AYau*}ak`S}wwU-# ziRBJ6V5!?ye5pA&)-?=o2ok=>j{h)f+SRk642i8yOcV%AF9|NDrWG-E9U$wIk}soH z?v3L*8c*;7SdR651JC<%1CW;qUmm+e&w_nx909X}eY+tG?yy-;RUq8J=d|w_BG^w@ z6d&;zb31464yc*Gq=sGLn&%qf4`sj0--C9-loHHWsP&u0&cwDwZ)O!5lJXC~x|Yul zhZ@YBk@Y@}@W^S%Ec&xo{NI2hg82v$N$5YI z*eDL$sNjY9>QUR#!Zxk!RblJE2@69R*@(zj2s1NsCHa~rtT}D?t zpiB{B481Znzt*d|dm562XfUn$CrUzXdLJ&D9_w=u^91|h$c#SWc}&e`10X{3JvbHbPVRYmZj1t703F{L(z;C$p7NQqN5=G)Rs*hb^>)S zp7k#Qy_hQJN^f@naQpkQ>OVeE8e4|!fEHT^>f-EIbOCIe{sN}*i6k=~?nUtIy8f(tKpLY^{=gZFro-e5EJ~ zxaEN4f;DsbI(9Z_9DW0>C|<1AQ6eyV=;l{!2q?cio4#od_W)N=`9zDY$m~ABdt0G0 zFhghfE28+Ugsq5|t0KP!Wk&!!%WyvP77Qhh+kfnY%_B zl=~V+Z_a8L9~H;UthAdzITO8zhWRFAyqn9C;hDII~}(!NelLjrli@<$O66vr@e87BJ*Tuvo)EA z6O?M({_?3hd)8}F4-!6EC|dHi4wXiDH1=OTdolBby@Aj_yB7c{>C`d!0n1d~y;0k- zs!kC%+mM@!m^#PL5T$Q*|4H+c@$l8CLb56}ZT7#y==@BFrj++q%vNV=`DP3%R(TnH1aV%o!Uex@g zm-u)f;a)i5u0|tGL~Z_Iv=+zv2caUMr3{~*b%F+0*ja)xudc1mX~#hU*qFOa5$L{0HQ;RG4Yko@CBc~*T>-+7g0YE3Rr~(S zaXB@*R82ta!S1LP!*s;+oN7CIXjnp~F={Ja@gt6G%NROliUYyN6PJ4o0U^TO8bwW` zxs$i|&|^slt)A2lO2J!l#|b~8Aq`uk7uar3dO+~b;o39*N@9#ukpY^+o8EXC!38R! zXML6s>AG(bLnD)k8$%>Z*UsGj<<0yp5xtKT{v+u zGFSy!W-#hW`{RoydJ^gIyACunWxkM0qvKhL@ciY5!-D?+eds1F`F zWh^$5@Tp~m$5%{UhdRJFz-y1uLDe~sU5=s_0055jEfpDcuzwaQHXz_ej3hBaGf48{ zkVkAULYOLCl(Er*jR*chbN*PXsjfRZSPteAwvmXZ?h@~vxo;k;D`k&1+TA^W@!|RU zno&p5{*KcJHF`(q)OVrt#3go(U$@d_|Bz^m8mg)@ZrS8U+If7-Mbw+Nn8g~s7m4b* zoDWBLR{R;TU8G0s9#T!F^dhXduHZ6YUfXJRKYN@e2uXYUkiaF{5oAbViB~c0C;dgJ z5GmOXRR=u|ObDhC3l6k((DGN_JwbJ$9^m;hbO>s8>~Dm!5bheTBeT1bPny~V$?5^a z1Pb*56|&$aM@9Q##D&AJgL0NMc~j*%g1zX;tf6>6?mON~cU>yWbyF?-ODd;Z$w-$M9t z!B?*ui6s9{nLa5ZSkT{oK9+Ljo?@$0nx@-{qPK^x7g@R77dq{{KK4xa5x=-EQ|+eF zI!Vu8hPP}8<5&6`A*VMb>84ad9z>Y!F`X(Fc}zw+-Ilay8|?78uy%|XHz!hT_NL?d z=p_P`X!@q_qus}E3Gt={?dZL3L2;4a)E(vwC}@&O$!Fj7GEX;p(a}aA2*w9T!Yd3J zO!IN{gvgSXMcYWtH4`CQ#4jC^bqr!eGySzBlQN0T)3O^+W-h2eWl#U`p55YzVI`at zO_m`tq_01r8PFAX={_?Pn!^mZS0?>Mjuzl7=*oDG8JqC{9}BMwgn#vw3Y;FfEyk1^ z+&1%B?}fXhtd>QHOuS)Ee53!%_xC1_yh~O!cwK?(C`GL=J|_GfPvk5;Fl ziTMX47XwAbnp-K=B^Pp-+$A~r3Evq}3wcCowO6iYYpl5QcA8SrJNVNrwUjUSeE11G zeHN*_o4S^{(4v8#j{?rj$R#4g7(Ijb@ToaYy95*QVfH@S0rm9fwp>FdW!}lUwTAF~ zE9lof)9m0G3^bwtc^hpbFqY13NO)&xdKWw3{&^%fg%JI*4WzhOI#Y+PrJaMW^WOOZ zF-8yDXhHjuqB7^0sH!&6^&(h!9jdHY-NN&DRthc|UlF!Elx|CUXhvN7N-|K)uyCi* z8{HYC)MhW)Uga!J)w<#3u#yGt9SrYl3Yb=H*V542zBQUwJ@S_HUso6EuLvLujKCV0>6(s=%)`jbb1cyRaP_B9wR5eH=L<(5!jx5sQ6|y{_mE zMU^5%Di=e4Q&;)lVV>(!0|8qsbLj?LsVU+A=opa)adQ8B5-p8QVqW`l34{%41XA&R zI;smW=aqn;<_`JwxWK?F2H-Dc;8#V{YT{8c+k2Lc_h#L+f(E^ zln2z?A}#Xd&-jD3-T{))qJqLjVUG%_r0rOnqAHh-71Oo1)!Y{{afBENx8!xib_ZI6 zj^H=7?=~O)=Eg;r=$P9TUa>qA0xnyV)mwor-u&xUmuVqRu<+g*@c=2+Yn^GjMMJC3 z*}fbpN$9bCi?pXU8Oq&$TBWh2t!{@BNoA=av^j8Q()E|1AV|g14bFgO5I6iJmaMXH zps4c6rNLkY2~LAi+8hchL17IGYDK6HjHg!h^WQtEy^!aB_9&+k*5_^T}j)v zP2U!${^i)Q9o*KaIS0iiG8V49Kwy(>o@M%CJ5=)a>o3-Cxt9Wt!H_pV7tww^u{qPq|TFyocLrV+d5jmn|Aji~TaZ9%oS1{534n*DWyu@8Oh z-S7C}2v>D?ZYRAoJ9vj%>RsiUU#hjkqS4xOnR2CXuk^vV)n}3;gwx=72(`JL-k#gi zlykJA_zvk^+LEoHCpP$O{DD2wdUWD8?FFJt!VJNCKLIv=Z6x+7+^DuVeQs~PGM_uu zfX)fmrWkW3tkv+rNwgy?7iJYNp?+yE?0|baa*!fxQ4~ee@85PN$5JEY+oi)l#Gu}Q zmX1L((lI&l&@eZ8PIm`}Gk2rDfh|ht88l;KE_i)15e(4iFK}UdoIPfiG_r8M6SXdK z_7ze3=vUnHQxX$v)^s_@W?q!;UD~gIRfKr8{uWy|f}&tj&{TK4d>j0&F15u|-d~3$ z>k+)(t-oU}^RT=N$?Urnv2M-@uQcI!^ z@QuOVYm9+c{iHS2oc@6`$u|EM!e8gasC4vlOyqB`<`+k-FAAF$?NCYPn=LERf0Yxu zR(H(}nl?dq4gmV>IjvEIZFbY#S-lrCq;!9#^=wDh`(nitke#%4ih%!H zJiI_lBlN#fHZQZ!EeTNp!&j5q|2^?EGZjOaZH2o4(u}oS60>{Re3RR)y{jzqq%%Nr zftNGO^prW&r4fa$cdIRYp1!)y_%%!Ow-*HQw~}WvZpk8h-K_14U6ZXn+b_1I1uVr< zCQDIRpC{|dR)eSaJ^$Hd8h7~kHKs2>^Ovnq9$WMcXlT9OfeOoqj^ImR?0cr`y|p~I z>Hg<73Vt*Pj|lr)v>A>I+d<>p6Dyd{D}HQYA7Yx=3EJi@-6_yaKOh11q5ms#yoj?; zZ~+<=lHQbKsFr3YW}I-r8!>zqqa?Du;`iYSlV70(vFpDrvY{r?^YNu53s3QRxhCt{cp+y37N=b<|yS!USgcs$nl& z3ibu?vm|AgW_YT6rvpWl)2HGkId!WsJ>Y9>@0- z64Dycq0KY%k8SNS&051QchEj}l75bu=J5G^Q~$VLGZy(qqrY#J#K-IlR<|11J?j&6 z9@p`*$FoNB>}xSQbBw>t&7}N|Dc|nGS=4{6aN2trtr_JO6uaSI*59MPx^M`WwQtJB z^AR}{guN~gOV=n!-gSm+%H|HYhl0EuD|nt zdw;V2BR4yJ@w*Ed`>WkIl{OUbV3LhvebVfPn!*Wg7q&b)WE)a2GZw-; z9DxBJ8Td|kuDIeqca{>kCZr#R*ivZKW=KeQ++)|AyYVX|SspKPiv7NbDV-Azxvn3e zrlT0kzIN#C+}68kdW+XHEcfip@gTMf&0<_loQR0yw=G&iI2)zItdHS%1cWhZZ6B*P|vfhiu0l2mdrA zCH@wJ4bG50uTRnC^!+aq7b8HCfEylJ$!d7vD31tLV~^;0^+q}TsR86z z(YufRY)PicJX~P!pOF5Cfh!+epc3IRIW1tk>klJtr4PxEQ}rveQ$W)7A!9)AeHk;C7I`-ZF*6>_{*}b*$-h}m< zu9w@=q>Gje&)E(`7RSa`*F_Z0T)E+UMuz2`Xwz<=vuw#r47gAig?Is$)yWKJo}s01 zMfKeQ8B+V#6(F@k34Y!{i9L*A88e>9tfF@fLLz4>tOZ11GpBj-mC&80TJ! zcn3rH60>bPt+2-Hy~6I zFHH_iQ;Y;^fQLSh7W9~qEUoFxlI)7mf1qt6*@|T6w*UP+S4$5_>v}IuRd2Et=gJ0@ zT0(95)gM9=z7IFM3H2U7SkQ9=n;_1pI#Y7tdSm*f?9t37{PUyPH4_l=E1MWBy7K-f zki-olzsyKKPGvZopyoV0FB~U)`4YiRW}4*x_e|#ValvE>{I=5nW9m!bp!i zfB)C(zFzmfaLwa+&Uv5rd7tx~hvy0Kf+Am@;~J(9C9Q(*L;v{>JP%UTmR?r?Iq7A< z)I*!M-xqnmU`P+B6}}OA>Jl@hT4vH;vrW6V907ducM6Weep|8eVev14okdrK&bY->yv+i=Dc4(h)D%yCCn_ zvt~!f?Za<6HvDEpo*cGk|A8f{>fSCm(sRZ=KkR0H_}daLkn;cb;TyK6ZC;PiueGr9 z-9#2`25#m`he!$E`%^>@o}zwypGs|CTu=Fz*cmHK$S44`>e|VdCP>(Z%%O;84D)t7 zM;G2jJ$#mj*UBCX4dJEHrHz}bBQwPSLE--A74N#e->On(97j2SUeFb3Wj7Y>dLQ6ODvRvLSVmm>+$353}MW;k)j535-t_S~)w zD}-^~4_>28x*?(o6!i(U06M9Ba1sN(JX=HJqCM0jCvZ@cI{jb>S}H&8vdQ!BPc_c< z(n*$RGR_->Yj3u_xM!`6V7OKg&Mo!xv;^|Q+hQ+1%fUf}y|5`>A&w{9+c0nNM#Iy! z468wMhy^JP=^O!8c!d;#0jn)`xQZrJ2t!doai^njZv{6~{V^oE8D8vA0HF1jQmoEao#Ftw-R3t%#kN+L3O|- zx^NR}OVV0B=6cQS9Wt|uI0ApDr#%3>|Acsm78#8b+P17RRJ67}l3IJzXIdB7V5DL} z^zBQjV&4raZ1Vbsjz1LonJxg8DIiaZj;iv`0WBt#Jt`|qAf`aZHXP|S6K8O*h> zqwrWQW*Bj@MLO#1B?sYNLGBHH9HHWqv)hSvh@&GQRG<0g!#}os(=NjIS7&-6iz@bM zaF}|4MZ@z1Vv^27eSRDPx+P#U?MUt_|Kde=>*y`ruh@^x7}NgL2SW&`le77W3W&DU zcO50~I(W2k-zfeXSRP^1mj+;T;#~YAFMP*VGODcU%-vNi<9lfuWA_3Vd|vZC-~n8kmIFF4C2-IrI2oK^?U3*Ux_trfS1yssP*zw_4+ zdRb(AIO2)X>>p70hIuQ9WKL+rIk1v3Wf^H8AP*_^f!EI>a8t8cqkr84gB5dI2j;V> zH7;d4CRJmy95*`hY3&JE*+ERItLfY9rs%}fQyWV^{*#EoL<{-xAUSIB_7(lDz)r$l zv$p?A4K+hb-qo#24w8m4TNUDteb}Y7?7aWHwD~6=v@7K%f|q`wqu=KA$sc8!nQX1( z`Q#|vS<78zh|j6;_~K>AU0sgAm8^@mG2rxG6X@9smLvT;ks3<2eW(|;9_bqSE9(ZHGvJw1a^O`?WJ6)p-UuVXfoc`n@ z>jRi|THKZlq-4*THAnbYHDH{DQdY0;+#a%*dCmR)blL z_X%AOy!DXCOyBXZN=%-nU2)U7WYy^*|1)9;Wf*XHfXr+Kgt#%3OAoH+UE`oJSbzv$ z&P3fr#l*fgcS9yIXfS0>)kNkmb91p!moPt#pmgd|`TyXND+7 z-kA0oNLKaJ4|<7@ZMf{~l}7Ztbn6d#oimX)b(jA%xRUUHGLGX1|{w;>Na2YKNzE8y4@9lZU?Kqa_ZTW>W!Gm zcP;AKp9LW=6R|@B+U*d61*$?DF>IV8Q{3crG0%1f;m+4oMrMai?kLZNJOxmU6><4X zX$L8rs-NPIPv-5=eU`u2qJkJ7_yIM^=J<>1CIo5=^$;>M63y8F!2%2NgCw~EG~oW? z1cf;6-A&k*9UO4TXh%5IoKki}X41SNWHvKgzgVykorWqKG?_5=rNQ0#=)&FSg6*=f z#p6tvO7CUd>7^WEYK*pDO&$|nbFv5Hr2}czzMSj)1ETM!A-9juUIq>4I+!5M(8s)=oldQ~w;Pg3 z=krISkD-}65Yr+>i%Y22_G@Cxq&Vrs$!6&^F?OD{8o~V=Ivue+#zYflNrSx94bARd zZJwMWE~+_xF)73rdxj{`u+=BSATJmz+;l%&ZCmG#zD2WR0mpZZ*hw4O?|bz{WNX^} z32|}tsJ51ejm`cOZm=52QoM`4oz;)l4vlPZSk2a7z2(uk=ZJVUGX5ShW;8U!*7m}Z zvY3J(hYB~MCLbX99~{E1(M21UpwfX$=^#@rOipv{{o`(VL{RZV7_z-_C)GXCnw*7; z`|?G_CZ{YItiSch&&+V5QQ*c8(=*iTT-;M#t&%_*E5qs!G`UHd*2Y7Yxq|L!hp3$uy;;?hXEaA zi(do-kLQ?ejxFK&2K)#}U1n(Una`o(yb#b+_XCQasq5c1kr0}4v=Nd_zEr<@i(7Y@ z>a>q~`2&Ia3p*Hv`_5;tM?LhC-8N|ZD4iNX`o^OzZ3c13BoQLwvmD073!s2(^gi+x zom@R5xm{&K-!`|tK|1aW+HAR^mmZ2!`TGY6Tp!@_H zg2*AEm9PI~Z{8&JQ?{*9$n@XPQ@b>;qy00RE(<5^o(y^8+8-}ZDoWT5RD@ADdFMp0 zoO`_5vz(@LirO|gCL+M@3q$Q7r-pOun&gFuc-UdlBZQBzKNx+{-s%81G|q-0-*2$A z1?Db1EDhihlV%KoyIi9We;?-9w|m>uP^%V{W!vK9jEXm_)wI?Gc|(Taae*6vI-z|} zzDU|C-TJyJDRD;_cPSt4oRE2Lqe=|1WHA^Is}@t5Bqnjm!%u{{Gh)D~gC%%Mv}$`a zvFD{=Re4Dn-8{Y;f8%uD&?i`k0&-Gb@Kw`?Cn&I#KWKSCab3VrEcN7jFV;E*=b>Vg zpXa@cz9141tY*i(2iVt;TWQ?OdTbIq0P+ASF$&g`TUr)$_35$a@YK)@{zA^CZA@3H zC%K1bdR^Bc#IPaH{;d6tLc$4z{9hq+RNsc#VIuG@0{nN>Xf4bU5@*RpM(ffraFW=ICT zwteKn(0O5NFEk+Da`vL)r7bsZf}tl#yU7F|J;{r>7h_c~H&u~_i@W=S^Bukz%_cJz zCwXMGx`I>*?h-s<#OC_{EIM(GQ8BiyAJS6Km!X{XAWrCd*FZ9@kv$fTBd8nqjCBhA zqzJS1^EgKz3>_(ZX&QduXI0hcI^0E7HU=;{-hCXx0n7$T;Gvqk}my<+7!;kOWcwYV=>BE z20Co`hYC!r#Z@Xs%ib1P94aICsBe6KuMIysTys&$qwdDpRI!7;dftvd^7i(&obhU* z#qCc$^`x}S9;8^^g#pj@gYajC#ZoqDR4@d8<4D&PN$lx-dH7VQ#)vK%l5greXejYk zI=EFGBd%)}w!jc<4x*>I5xQx|dh$RpUpd@UvGNvy2?t}MQ1B52eacmrhCOmp&Z{C+ z$tW)=;e4O}A7Jn<9#Oz{NDuZi*V1=`I8f^Bjb{~~%QWTvX`K?4w` zC*K#@dR-jsU&2ds2O5D;T=7$9_8elokF1X~AVe~=#_D1E{J z(@z0|n{<+a4hF?BYvvvkzpp4TIz04vGI$=`&Mdh+W)vsKethHdwWIsb5o9p_+p?DJ ztywMj(Yow~8|Of>CAwuP8>2gTeYI-6`qi`dHmdnU-O}q4>qV{27RFJi+T`FT93kR% z6?021w8|77$Rcwb%nt!s;0?}NY)4}9zr^M%|JmK5Bs59y*a~i8! z3b4iNC?d$Zp4uCX93|@F58X<)9)M#6bblbqvm5{9JgNOBLjT~CVSLc)g#m6#hMiRX zCrtUO2-N%ikW)q$t$QYtDsuEB@~B3cEf-I5wlh1x&RpRDrD4Ca7Jmg}%CoVRpLrxS zy6*gCfZ;Dr{I_Sf6(4Dtb{m@Xe?=WE!4@YI$vmME)d1nMIQCq=MMhHowKH{n2*m5z zuVyvP@m>C~dNv*&X=)!;+`La9ARl`j1m_+}a~G+t6lKa4*cLl@U=%#EnwWGfRKW}jygs;4*^&HlvY>+jRahYk z(|h>{F_92yiu-Mn8zV~bIvvFp>%HFa#;M-TC#t643A0&*>lO%SOy8X9nMNYrIyVln;|52j&gy6tX# zH%N{ml_&X*>9ssuxPkVEUVGS@A95ucwHuX$-v~CU5%&8+CxKpw5IM~y^A9*YxMPnu zM~V^muRJt9(UNQs#=`f@RJbFRvV#%#O9vpn>&(e9$rslZCM|GrQNXoLvq6}G)QcYF z{Xkc?rSUR+D9MA6|J@5ov{2wy0-<_PNd9puu>FStzqn>aH933e9riVe+u^a?!lmbn ziQA#E+{gPv9rQ%k_E<5(($|qs=9GT?AI5)G2Uvo?p{&6-1Wg{Bb~^K2ne2brfi~gw zG=6~nxKdEnv8>^6$I(skyly}bE-_u`$*0>cj%-7m%(PW-&#k(r2@K24`puSkH)I}Z zEW4A(N+7!eKnEd@#suR;EN44>(9M<_lO+`SM+^GBhV5%@;>an!PmorTXiZZG7>yOs z%30QM0TSVd^5{U4 z)^zs2l<6J>x`OZ&lr8bD`k{MJy9Ph^k60(91a5x*E?4-V9r%P{rc&88Ic$y;4C0(j zG-j{c#-qPMxTk_?L0)#(SSfrx`7OJoO1|z1Wu&e?xJddPYn11(sK98vtwJi(GVYx0 z^BE8*)add;oj9(iZ{N@dv9}r%9ss>AklFz|3%D7QKZ6Jx0+UG2Ptar55_^*3nzFyZ zqSC2F_RyJ3DtgY1#_XOKFb1TyvjH#m5JBr3f|I9wK~<1!xbs&tq=&EP`7KKbLfk7N z)Dj+}9gA^2^TUMf|5iZXo;G3sG#;uZm^O{B&F!NHE6-+<>)|sH`w1WG14oDP&Zp@1 zJ3{ljh+@xdAMA}U&ZR6+E2AQ4-JAhq@ni`*lrK$`u~YdFG2{ahde&`k@O1YV1);b z$o?QmTS_LPKu=UZw1IKT?RpES@U;$5_c(jx68?!x*rRVi8A(4HPAc8XvA{Hl1CuQZ zK{Cx4waKXv3|mM~ywM7p#2&%#hR=x@eNCJ+R;%qO;o>l5v1Ne*EmSMI_>YEIjqE8C zz#0((Pb6TC1VsaT^9}PG6;Sw>HLl@asOK}2UzU(Gd%7nOgo7bY;wEJ2;TT%f+eQ$b zi0q!$heFmHWdchhNZdlnC1!4EvBsf$*#^I)*g~&1M<^Jl@?2VRrBgr5X2Tdsuq*!n z(e{N@5ZYgC)}vPH2R|CnOnLH2PxJxknpw;MsA*P86U_b;n+Z_!6ayP0P+++MbUHDc zJQE+|Sm%2MTCXydS?8f{{2d{ZINR5>Ji?w0W&A5?O4BXRH5qAmvuPAXfxu z2|ojGg^jUN9p=9Scl8&%%L;jhUEEGIQ)KW{2jEF5f=xQ5lY2zVeZw#xSJ+3i9B^iz z;a=`J;SC^0v4wtF(faI-bu~!Wg`(=D4_w$qP4s>?x;H~?NrQFv4bvKQ!u-t`MBVP< zutHCL)2cXJxk31btldVa-i96F7>Am@rACTQ@+}TMnHp0*Cz@rn=Vj1aSfDE&-OmT zkf^(6m(zodnrZ8VUrmU7Hy{$~vmPLIE3=Aeyj4Ivz7? zP3-io@ci)c-y>_;LijB^JraIZK?r0tIu(L3F|^#3U0fp1IBZllW%W00-N!}UvyU{xD-QDMQFck1x{DYJkCyx9Pe@Cr+ z-6^X&(nIxRVp6ZBh31!_|m4y8v61}eis$RUVuZYC?7S`ys34BTDkGTvdGpChRdox zMBR|zgcyOhMbHi^74~1N3cGp#DuT+y8IxBsOR6(yT{_D8U^8 z>H~w)oCL;00=gH!yAqW{ZExcB&%~M9KaDxEgk_6r3w{4H_9sjtnDb6w#8KkU-B_I+ z6rzArSxrmbThH^#`7)+*n-5Vdw5zM7q6+wB)_h42Z4V~5h?6x2t3!9o#BRYBspEYe z*B))1D&E;jcC#Ibu2ps3s&!VpvGH`*r`z?mueLis3Z_-W4#!UXvDwZs#_yp_dA$~% z9e?(&{GQKu4YEs}u!+}e(kep*`O!(DL|6z$w|Hj!C@+iHOXNJ!7&*iWd{{bhj;t|r zWv)%qLXAWoLTNL)+4ihlY1*RKddzPz!hvo0k~;!$ulUX{)j0e|r(M+1=2ajWrF>QI zh$Vn0-klX=SlB|LY;mgFmDRnCDapF926T~*F(Q=4$O7bvdlShS!q>&3QYzP4K5q8v zD0resO- zuTM~EpOpKpLh$?p#BA|GKJ!Uw(}E1gjP7?(dWMVMg2L4cY)N@OO%E}?Av?ELx)<_ z1fqikgYt0%wf}GTW`FY&4tUW0%r(Y0DI}MP*-H@&k1~hV!*@xLe;tRcdr{<1KDPco z1qF+|`-`_xlwe)8VQ;DdIjB;o{0OeqpJg){_y9uz6-vtD!e^agm*?lpJDgBj=PC+4 z@9jNtWlV>`?{)I*;a7vT|C$y%s$7Z1$e<3E;%sE21iA=s@9b6a)0oMjO}bw)jBNVeB%ECKf;gwZdxtAiL~XxQf~~J*NKppM2HdAZO6^}{PSzg#PHTO_JYy4y8=SCdL>!DC$9gv=Xwo~YHuSS;p(Xm7R^ zIsByd@|HGjLhOdJicR8Qy*9j(_;-uv#tgqPYteKD@{ycLKqswR% z!*k2=w7^LWcO4gt>1$khR(sd`i`noK?4T(I)={^xpOK~l6#vJja2PTd!&ja2szZ_- zRLsvJl0|Bt)~xPl^Vl6Rv3`=CN_M$=OLaU@FsQLMcNdir6El4)@7TGp#g`U_2J^{q zNh|_hwW0N{2Cb>A=C?|@Jxe@9h`SXHFIEmDG8y14BOu$VLy@u6M}g6;5KbmD)Ru(E z<>Ek8RmAC~P0b<$*|3J$5@P)123jCKyi%KTmrtiOLc=4?GJoq)C9ldNwFJq76kK@y zN$gTw_({XQulp{P9~Zd82XhFmYcxg{FHH>=nYf-lf3X@~MgL3S^@oIPGO^d3@jZL^ zKZ04n;PvD~T*CnKMAlIVFEQu$B6hL1QJP?F+UNNRFb_EnB~o-LeQ(>1rz*@Z#O6eo z&nd^x?A$#$62ZyEU%C14Tbw41en}>5RZ#w`z=OvM$V;Ly&gN^d@%7@Z)QO!w$7DR@ zbzpcBj`ZEzft4#jTb%tQa3BYJEzA3@n8x^%&!|lzANy&3ROy0uA1bJ5e)UKaL(pAj zXSL^i6ZnMJ>&XjqAAaj0?+`jO0))K=*C2dZ?(9ZgULGY?d+F}be}H0WD&>94#H*p(|CF#b>dvA z^%L6RZ<5IM``_9oRI80>f|?tVeP@}66n*J4?=9+lxOQ}Ek4t{f7oteye|R!{pb&hQ zNb4I67QmJ^C2B?csX699DTVGohAc6v0+f#Kd5Yc|m*+QcpN{1p$eaK8!Nb(kIWWYB zU-4Y;#k7z$GJUc?&X*@V)%hJZ+HiWcdT zpDXLcG3HUPTR(P(h>XM8wSUWRBgzI2mT_Zn1B+w5>HRQuqWapy zcvAam;s+o&a3%(fH$?K(i_-b=Ql?(tWw;&ZlK;ejDabX|!J8rkPrwfHdp&fI1Itrv zNhsRyAVyT9qZR`@S{6w#AO}22W4{|CHcN6O(yuj(I`8@%d|5!p+fMrzFB`QQ;!(wi zS;@3r;63z>|5{tFrsoOqE{3vM$v8ys)TlP>n>N2H(P&5o!8r^!CwNir$G@q0jvJ#KeV)XYX21 zr!T3pl@|{b>lS;=@}hLrNzv;ma7~fKIt^Q7hPbk@g?#I2N0>V_r@-wzGm%Tx=$I75 zkv_Wr5y5MP*9&CuwgSd~L%jKy0L`R?9Qa}0=R0X<0-CLloBhZ#a>ibYXs`FKyyO^Y zpG|N(m#BUF>SrC_vq9%Xn-61_c4m6mgSF_O7sbBLMlmeUiTb!pj9$rXF2%Lh9|jHb z|5h`93l410OP;$0*b}caS1|6zb&=~51Uux&BK6QE8{o*24_RkwFs|FkeAqyn@OCRu zNs#n|4#0@En3zLi_`qEKi3QwH(R?(T))JRbqDc7VOgeZ^-O8)!)L9~D_ENt(3I-Vs zbQKX3y%g#!%XJ45TUb0dC-!}>aN3-%@_(;EE9G}`<^>1d)zoCLewN|0~>20hRg zhaYC91I>ZfU#s3H)TVXfL0MKEV#K+<#@yYx*mQjE*}MJ3=F(XK>?Pn(tCn!cfu+Ww zU_HKt9&yI`zT1n&j-s|>iXC6A_Y*( z^U1!9#95+fu<#L8j^B^XJ23dyZmFj5z5nvLTFE&wME8m(svq@)`nBh$c<@tgVq%>4 zv6_e9=J$l4VJUKWByqVNg}PPBi0Lbx50{yB>NBhqz6efb{x=RS$IC$!a;7o5ffe|Q zCDW=aw=dW%TE5<3Y>JKOC-N%;7+V9AhmR9zl zn4dYf=(pF0mwY3J&?3BxdorC{@Nk49mx| zA<9v3FSt9qYK(}zB)KHXs%MstrCUgm0*RDW`%)9^0^^SDxcOsOhA3Q4__Kdkf<&b6 z%eBQO8;|YC4_g(3MyhdrcUCN>?pqjW>NQM1>il@Xxozcz*s4uh9V6go8+V@2#V2S(`t5vOJco;FjGV4>R-{#h^|DV&b#y z;}uL7F6fdge<_n#-94FBQi^%46 zd+bkyQM5%SS$y3ap8puc9dvD%^ET8b)bk3Fb0C?b9`Go8W^VLg%xj9DM$!8>s@)ZC ze*BEJ;oEoZ_+XyN#w!_W1wYOQ3Bcsez8ffQSPI=09-V61f`m$Zk1QHIe12O99}*YX z6o5U?Tul=BXSxj%Dl6>8l5c7-mKJ$RoM__qZVw^Wbm1UW4vM7jQqDcQ_`7#+CNqV; ziSp@=;wDFy7dRH%@sgO&ZB*;#@ROrV!?&wKU`{`qSJ0zP^!N?wC~@)a-65#cR9te~ z3J%v)X^{rcqlY)JO13apc?DnMEq2Y{sItTh>!C5w@oQGR{A)kjMhaLS=k>W>G2UV4 z{3pE>Roo;GLqC)8_%(FcDlp-0Jrw#wrw7l?~^{Z<9MM*qZVIq zQBo%Dumr;W%d&uH@tLhK?)i`JiR3n<>2$mkqBq?S;CCH2`IxkaDp0*6T&7+=R@$`1 z@z7@!oJ?;PP9>Ah--z%_3^hX9qT$NJIlNo!`ifiV4D7fM=K)0peD2X^iU9lmYT0V` z-GbVk!D5>&qhbzCWgaaRAIjB1%V~A_swrfLR6lyXW7*r9UY#Dt=WZzTGeN?Q zXHP+TG&<{`v)8iBe0fW^xRKzZx=J-qzYf3Qt+S_ZQ6tVEX*O91dEyv0oBVbe>@!$p z$&P&BBHvA~x60^?8<3MQBnKgsoBVs|?`HgajDjMkr0{-IUv8I-6M^)D2W<6#TY&oM zTi=zWVSJ8&#(2r<`$=c;y9o<}(BE`}&bTd4(1J)!>?Q3@LrYOThdaRefW1eW9NQh``u1*aqE|~9<=dGI;qvPZ zEa3Pj#{I5ZtmyKZh@5f!@m{%cw-&P zFdA{I3GZXK_3U8;0F#-%l0ZO4-m7|6Bb^ZQ(Qs^Ui=dcrAgRWN0C3td9uWfVJmAk6 z-&NsXEHZ&b(`48gg11GKljcE6gh03TA51D?X#PJMBxW zvd`~v<4i)GtX99#;LS8oibmJ8GWL?m>;d{lA?0-lBRBAcG{wZa-raMaY_b{z2J{}D zZl^M}qaM)mv({X_i#_s0!JQKdWgSlkbVDM)R!%mT4EVO{y8QGgPU5PdNKk&UYYlVaq5t%CyOyM zfX}SEr9=vp-_Cy^pRTscTNTG#S2{Z14^OxJZ)X@80}#4r-am&ILQ;~wbQwrpx&-Aw?x`PBI>}l zIVKj|Dw}1rZF!3!20`GUl_c_tODQAU2lL61RdNiU0&!q1Y6$Xa(6H}#Xm->ef#V4m zLr@0q3K?-yZL66HDr4OjVQtsBCew$jwQe_0lpXMZl@W3q$y%oM0jU@$ITtg)@e)(sI ze#JpP8?=(o2JVn1*f~Sb(VB3M6mhVD_aJncI6^LfeFg-a?B-;4;)WLUXKCHFf96z! zOU`Z%c!H0UVO>~fcs>D37C|;HOv zQxyGpluB_?2mfx%Cr6sBuL@zkFRH}#Y9LxU4_kuCltMO!bP4*ssx;w*N?f61Fy;j{ zB35qJ6xkOT?AT0|K4sLA%-q9>0!a>W6?|=+7Ud1Tq%uI%MuL?8Buwyj%lwX;AMBS+ z-=4&Y3RuWS*=YwI^_*+tib7c)jy6!!)riwT3=`L|3bEP?;_m9>_q~~J*b6TZ3cj6d zwSB@ziE{)b5a7|BTH)`-t8+}!zwyGx1c*Umi!9|*{aPCMelICaj}UnlzwK>TKXFN) zRZs6NWjw+O-Ch4%zPlzXMsc)szo}X8KdT!>qYk{G3cPK*k^5Pri*r`b)l~P!VJ1@E zh2>NrC*;2!QKzm7cYKILPH3gB(#pb-Gx8x6*s+L%=Xw9Y-Rhjoe_?f6$TH60Myxo) z)Vk+~*RA=b?0OcjO;MuQ;!_E+R?D5B-1AAG%DROA6|ESHqjj{S_m)*M@rnnm8ndc)4_V7%|)%p)p(yU)P>@2sDfHfoV$eq zDwSIX9;^Y2QZ0s-S=LK4#*LqvAluDZVxXy@OI_Om1l%XgdTL!MvpK)OQUQUw3g!tf zJMHpZlODu?#hP|)Z z?B8N`n7#|GoMqA@y_6t$J7Q{IidI)h$dOK4qk9+_TSYV3zCwZhYKZLi3uwf-@(XW?_*jUkSNK5M+=6}x394kA964hj$dYUmcoB22`Sy*RK*r>k!q4OG*xJXn zLxO!?zj4Z*obN<(jC3(>WQg_N;;>n--?*tz41XoXq{o5tk|$P4e_f$TmdES8%>cEoh5iKlFp)aN{)P6K{&f%-vWXFl9?#&>!kb zrLQ)S-trb|d1x_Ld(+?ED7(sdY)0R-f;~}{*gCvMSBYGa%wFEzj9APuzTeXR#U_7T zqUK*gY_%`j0AhHTbH))SjFmJQUJ>CD=(B)aJ4}1l0$X4TC!wlk&ux$;uQmX#$=wws zKLO`PuRDwdS28K0AK4A8grmopxR-}_+qNwTJ?*l$FZmfCOj``4Hzki5X(wD> z>+W~(*lrSQD>X2Tu49{350e!YHs=JaZ9AdD=ZUCQ3ujhnRsho1 zl-5ZJ-{yC2XgdB_a^TSX$5zO!sf;Ck>z#)R9$$Ssqp$S(i4k1h+*QbCL~fKY*XpVU zjq~BxrUE9+X$8FhYrEaca6)!P;8V`VOS?G)V*Gba@_|CFl}eLDD~4dVk%}V>a|Og9#WCNDepA zZ@Fy!K_}O2i=}}UQ40(c#h&Y!zI%WnFQEIt9ec3gSW-hyk`BSFrCBA5$D<7g9pZ*>*TD z^;tJ;?fP_Jeq|Z5DdIwuN-%a~H!+C>t12d=^nYMeom8$lN&2Tz|%9U;99CaCTBAcYB5(FW*Yzx12pY zg%|DnuxUgK$j!?o5^eU!y*-y*%Qg&qzN~Y$6Nk%H4E>de)$>)I6n|af93=d1E!5*Y za3U+*hMoQz%*fdf_x}gdw zUAElxJ3$=5#iM)b(wvj#OY5giohlDQ1Y-nL^ZHP{ip<*6{qZtvQGpzMz0e61G#DU% zyovQHBH4!P-v$g*c*=Qh>2LQfLByG{o~_KY!IR>wqJ8UJ#)q=Ji#H#>ZIQ^Ob&kq|fS`u7}|p zb{A-p&-}&SHT3*i(v>-}yfCR_Xj1QT+`f<9y&&3i6Ju6NY~!BS#sq zTG+u1D+LQhg7s0RYw5)B2`DX$F35+daM$$DXRRf^I4$;_WSJ0jK0Dkt1AU!$oJ^ZN zQ~>2;oHL$zW4~$T#(xA-66X7`TR|=gjFb?HeZ#(QO^q<}8t-7%@)aY&`qE2oLD$3@ zadu;&3}J%W&CK^_vYUhnhTH8~Rq>1^1Bh&s_k>{p|J)5VK;%ec7%|ay8FS!BpJJ6$ ze(yd`2;_*2caN<7)*Uz5E5|lLmA~FPW`V7pzy z)#e=fZWx^>D@>snOvyWy-D8S-iq78SE8ak3WK&mUb2AFABb*P&&e`nQbY?Uo?HAgM z%P{EK_cI+=V%2va0Z#{z9UGE!GSxIX-aAzn;+FI2<%2`_4m-$Lg!TD#4GiduY8C*9T5&ehqm$|$cT zHWDg*;?50R-EsVR{#<;{s?ic_qSL>Yl=ok1I9vt^rM!p zeDmz-*NzYtqHrF;1!r55ZEum%BtTl6XWvmQaVg_Q?U!5a?C$Z1g!HCVE;3c$iioPW z`tMA59C8XOlSW=+?a-iV4?#yZa3#w{FlG}%LC@%`h=DNCIrBF<`HDN{_hLch+bA*n z8t9fr>sv_}K)j@ku)e!=n$M2>YrJE`pHy7=@QQuFQ1>xlpdLq?p^^k*A_2|&1C!EN zF8XCpWI*$yR#N+SqJb<*P83m!09?#+m@){gV6#>vt7cmw)Ry24i1{7bEVd#C>TAZR zU)+qF%v}xC4#!h&Q3Cc4mVkPUV4m{pUO2fd@ zh&V{Oq0oa^?E7@6TdVoHrJ(sD9y;;yuv`O!-(YaXyW%qQ44|aaem^QJU_(F?f+3w zj5Qy@QwTPvbAj4qDil;iujoOis&`C&C$bL`A73>HzoUS9P5r9DcbqTHIG;V?vxMOU z0^=wzc1p*&@N0%t^QA1e*iH@WdoW3DjU#O%$38XD$0-Chm$ zC4a+8m2#R70P`Sb+J%+t<0bnNGdHC0UcVfv+A(Bd%o zL!MmjiK;AQ6rZ3a<|;q9mHAm?#*sElyQA1ygP#d_`kErZs`+EudZHP`BE*cg?kaE& zZnwDrIq&Ny7~0`$0v4iha-2i=RP~6xpxngBrb$rD;nl>8E1avR47Texzx)PP?iPFA z)>l9P`?!6G;qWGgg$Lv^c932oI73KxggK6L-whxGS6QR|UT>GerZP|^iul?fkGXN& zW#>E;21%^!e_sdW;5w#8L4u5|TxmAQ5h~B3QzWm4G(J6sETj3Km^Js|r4tyfaa5XY zXF_ER64Xg^ig1MW=uJ+SUKU|Xyg34ljH&-5k^yOkMKThs;1^xGMVX>DeUpJgh($#% zYz|w%p|l4_x$E-kgig<=3r)No(MnKjsnI5tnrn8ogQ!LRdeeb*`V?k)%RaVdm3kbC zEbu>bzO!bw@T$;d4JcIMIt;xWKI*T?CC$z7{93;srAlM`e$=^R#k|GN`JMHd@r(>5 z$Nx%E=G4)}RdCG1kC}7C!JBZ=C{Vs1Dv9!(BwTU}-K=pO`zcB%LsZ^%pM_8qYR%=% zMDHYl^re){DKA6q+0oy6pv$FIyoMw=jt8pDr=TQCuw>M&>t(=y@;Tr0w38XS}tI^H~$B~74iz9-^4K=kbdB9O5 zfD(S)eSf*m`fJYN^(xb6 z;Cz2q*sN!U<7{Al6&x5N#-x;Sy$G^jH_p{_TfouBJpW4f4i>@$&u>t%96GQHYN!_k zrX2z;{)j@6xdG!(VH6)@yU-4o@TO|cIF=>6oQfS)$5wRdE^L!-L*!6fuW=lX;7fUL zcqs8ymPfMc3%_v50b%9rzI_ssOTG7!gYsc5+kWv1#iA>Lh$2xS1@>d;BNI>lr7CvO z;nJ)UBrs`xlt0{t$0!YW7gF>;}6&><>6A%ktU0W10Stp)>iW=^= zxNa|z&%Rf%l*kdMuNNv2o)u<9AsLLp#R3GCLGomC z2CdqbQ`pNoBU9W6@uzIc|Ik^VfG7XZ8u`axC@foBibSa)BvjVdubJ8Lv>MtmI=?0C z&Rk2DmHs3?n{#iipQcAmtgf6+97Jjw!)8UxuQV-XEJ4n@Lxy_xdz&IbbvJaNFG(M89wI9_dBWa^m^A12Joh z3)$5z$`Oj&w#?ncpOTy}P>IA_`6VSDWWRF*T%MA<7eJJ#0q~0&ky(}I6Mjt6o&cix;9G!)D`7Bx=~~(j?9O+=>v4)z32m#V$JK zcM7ho6@;qWpIm7S3j;Lp^qXSmG&;frnr25VdfP`~wAxv&E3fUGSot zeVcXWC;|qyul~(o858eDoQJGXeD%Yx?9Me`_8)g$oOyQ%o3`G|iRHlAjxJw6(DjA* zGcNwva7Kv4X8{la$<`~%C~|ZekU8AfGpTy1-Z8f4KM%7MCIr_24X9V;Sb&>nLM`a3 z+yIz)C$BB>qXl7i^hWb4KD&aJH53YHi+H!L!`IM9@g)ahg@cP1cOeQ&sMJ7T>MB>i zoB&$V+;7}S7oWHb>mi=9AaLu3Vw9TRef?AJOFL;u-8ouLNDPjK9_aybi3>VQ1}58K zL1BDLx?B$w8OJlyieTa8ut#O6Xy~UveR707q5|+RTCU<6g-(LD?popzCk0F+dB~;R z6n`c78n^TjypfIita3U;-a?6BU1Ksi$iCl2{Q2xwMR?dM(N$2ChA?S9Isu?_Q|iYUy!k|I}^r`~Lj2COP)jyCuaq`VH%>x;^^k(#}_-9QAkQ zJ|X#XUYBp5FT~_39q|an+Tz*o_*`{w?oh)@V(UJ5QFNkAa7NDWMf9Rycv|&ey(wcE zwCi-+-})C-=ckT=qf?XEWpB z51m8`b*MN=Q^t1tJ4`qFo6!oI{Q@+Y9B%cai~W5 zhWNHV=&l z9PSHBQLp`M4?p<(35=Sa0b^dgE!8k*N1rR+Hs&Q@Yq?JC0l67ZVE?c%O({f5wQzR@~PoE+wXGS{q~3IjS**3B!c z*%1D2XTq-e&0$3wOTT_Pbj(rt5Ur&->q({_i=iB}MK#P0q}nzy%STrX8+oJF_=+Yv z94X{Znr1|JJF{}&7sG&uZ>_P3Z2!amJPVSN|3P8e<<36n*{oggXm6Gv6%(=X=& zUoyA?!FL zQ3tT?ZA#HT@BHKjGF#+U=cFZy|K{~SLg%MUQ4F}M0lc5_O2{2HUwY9=A1&_ac0Xlh z_yjv>pKK!!!2@PBAUgm%|5B?q7E9QsjcYAbHmEL-%zxwz=brGnBB=&vV38>ya9u5Y z%W%H61z1d=2dA=Iyjb3*(tw!$-27>JBIOTer>ooWW`l%J_7d;=>e7TL=_@H1jcrPy zi@o`f=buk6j)$8&2rx~s*Ojtc#ZJ1=7)i>!Sa>da3b`@<5`e)IWLIOLY9HVOc^i^+tQl!+`VFM@#rA<4+#E&nYxLz>q)xFNti*r`eQhcBgxi6JZ7uyC~ zVR~f~?7Q7;;Rk$`O>=s0b5FrU1B$@5|wHu@q;$6F^moiq2vO%X9laE!WEO#lWXQS7~vOP8VIMe#7>_@LL@M_vj2TRKH zbn!3SMnOVy>5LSs!rJl*nEie)Lfo#4@iwI}sO^;IHsYmPE}9^JLoRX|?}}2I;VU^2 z%2+8oUWB~x(aCKXDWPw$j46lWj!u!m0eY;De>#v}T+?8|ijFf~QvQ zf`QN;?LCG%act8+)+LU$kNhzCw~hNOa$&P3`%zmMFUkJ_&TBbp(@4*=I~GDGOXxYb zGK+{^yZ*wBP_V#L0K9_`<`laHU|7f07(DipZic=Fu;)RHj=r)?>P6!o21E70Quf^y_)c z)ps9!(Rk+g4yJ_Q2yCcEfCUwq^ZpVf?0>U!yS}qqCv^6LO<|RD=xw=FxYUviUAhmI zHn1*xGm>N|@6{Wk#&>!z^WPE75N){QC|F)X+uXhGSk^a`557QLkn)Y*>3<~9ZDqFx z9E!{(u;1m$D&xklQuRlcr7rVS7SivCY0Uqij4k1QdjE;fNUVOuf97EVbi@U2MJB@| zEBdBx1TLn{T=y(&4i}<8wt}n`zER8ZVzeNZe!~w{>=1qN3QCJOXew-zNw2$~DNQT$ zEtY*bc<*PX2l0!O=jW}RrFatf5p;?-eH^q-f%#7)R$URqB>Hs>0!#n{qY)ejMvVNM z!Fx1_rKhx)wLg8F=4dl(nILW;OM%FvUK=a*tVp#WoGDVuo-JYiw?p}VcSr^?_H&xy z{p-}!0svKvd;#=Ib` z!dKnyogqf0!(^6;mm-;(NTh^KF^9+{4H9b^xmeb3a-^AekNx@Bx#Egn{BCUUX2H+S z0WVZv?R}r^Xd~8oc+%`VvX(KQ=7|sx&)?16Wo?b;@k99# zqYrQ=!$9S|LWr?wzwn0#W9`Jp~#RRI?` zKvZQmqwc8VqTHZ-2v0-hqYaqp2pKnD188D8bJ6Nb?2$3=-qVP;h4-up|qD7)*D3X0GG$*G>q!g8kku4;$ zL`nUw&(!(;zR&A<{)Fe{yv{kVaXN1Iecjjfe!s8lzAn`6?7r6-iajJ>Ym)3^D#)C0 zlz3BdeCTfKETpSqh@hiq)bmJh2~wP)rsbAQVpLU(uFC(|JQ4F;D8#;O<3a;?>y#Wa zUY|3OJ)j?pzId_Ykdw>Z(0m;g{O*$5UytvKkDVKQ^w}~0x~1b(9nEea++lA?IYH1DlaBx5pMwjhDc>|dOS`-I{Rtq4=A@}%HT ze_ScuW5t$UHwzT7dD)x(_&(RF2x)N@J+Dnaa+S4jznxgFo|;y7>l-C@5?@3YP%Tvm zQ_@ebnSONse(c7epqHU!8ofL%QP*+EbmvDHWE%f^@u$LlFFBl)5(J)m-kENiPv35P z$a&W1o#YcPX_a|56{Cv;DN=ALHo#5_cIxh^F@32O<#}p4j&mMMH%t^;l_Ukf`5Tw- ze%X=T_e~es#zL;2c+%*Hlvjyi5URIQcftGfvFE=lNlZ@kYh1qwQ=oVDBY!S^MPhXq z)7)1yfOM|lwaLHGAf56h=?tTbNRy*Ydmi5CG#*4w16_zsgfq%*$vty)&7UE`@nuf; z`R+T1qtCqu)BwxMsr$VR#%&Yd86=01d-cKYDbifWGLi4$IZ{!&j(={1g#$ASJp5At zRf{svP@U+;wbP#D_T>67>B>h=CL7)Sz*u(-|01l5iY-D_*u51uH62@BY!q1JhYN)h z`O@v+At=6TNm6KSFDMAUDLTSV|MyYn0ID?)-FoNZVv&EhFYWsDmI8^9orV=MRC0k9_NoK|@1A_R!AMC_Pn^S#awgriFWQPw-z_Gad`&0$*MedLp> z`m=BM;OA)+Qwv*}RXupH@ARK}X^#@7)(ptK&VZ?S-)FuuHwPrn4W$oxt^D|;-=Muj z`RVPn4QiQzQNPedF`f-tH@Vv zvFzWSdd1wrG?;1Hb`rUlbL_v2p&q!eF-~^-UGe5jwZj2aE)5PHKBK)H?@!P}Se7q= zmqV^+khT~yDeRliHM#W?zhnmjWi`QxJiv5E<7Tn?z!DNydqHjUS2Zl58qc11j~>hWgSrBQbf675SWVzI`wOxLe18tSSJg zB1^5yO+N`(t$1z=EW2eA#fsEHVONru2~!$uaWH&FzAU(^Qj)VF(O;TQ6XGmvrMCxdYfkY)9DRw1UiBfySfJ*&6_+8#ezEjp>3?h1(M_ITuuseobdwhlzf@hJRoAnxd+ne>0i#v^)<(*D5f_x2f`Xv7bB0#0kt$0A-r^t_DDJZ+| z+r;)aU!k!VUeSzocvZoWeTIA!&e3FMlx!yaP{+Yu$ z_x<`xlV7T^z5r=a)A93lcjW^)sKn%Uny3`>9eccHM>Xe*eIF~R=y1+w&<7VTSLI*W zTwijSzNB#a*pxe5M9xIc(q%0h7G$P-9MvNIfMx~A;KBvwCBVB`{81)UN{v2br4AAfl2m5#~2+qxvLA#-01F z?tHSNHo&~&J{6zq{(y@g0YY8n4HT&2czh>CHvE%r1hjK##c2srcZiuvun-~#7rgtd z{3!P+#yiP!^Jm?Kik+j+8iKwyFWBxk2|_XM zeE~DizI`8c=;l8L4#$J0ayGlqL;@E+J2&z^vJhXl1GuH^r;&5grY9H(gGPSt1AU0g zVcxssprBb`l%dA@u+&uC^F*$|Z=hrui??h*;&9CNe%Xjbo>p#kEWQ>tOYZYX8wx{M zzS?{s4NLS`v-~9pb!;`Smog%Fy67y_dXjw=W{wU{qvf&#J>x8POvz2Oj(PHF0a$6| zE^xyyY*{)*j$idYm}h&gg~pJ|e&tEiVJ(38lzm=;UZ>4gxINgL_XrLMHL_R|@gDUS zNHQm(gv`{zmyP7NE({=S&rJeFQyErusHl4mDj0Eng@uJrzY6A-Yi5q$s<)B3Wcya4 zIb_^C)A904V;30ehfVqOJ-e zZN_}k^&G8H^i4YbF*ky}04!)*PeP%_xa-n=epubG#so688>m|oX1Yk z-%9-TwM*NSiYu=S)WMO8feS!ZU3^3A6IhrtRQ;)x>ZK#Y55n0?>(@0m0UJ3Dk2){s z+O36D{oA7)VL(#QTljKzbMF>)FTROt*(RZYx0<&8N8`>#=gP2ff9=7o<%fyKsduuc zJsR@^DL! zpsYrRzO=6LaG!v;U097iwFLo- zx}f8z5Iu?+KfE)x*iwcX4g&jhz`P@h*t?6CWISCpq5Zk;g)!o}8t ziKBVw4K`6FBqs0_ka&3Bq|w(m?sN#!-a~l16DhlFL_A4L?m(m=@F$K$q0)No|02W! zC;js(9=x3+o2hVVbxE>Z_R`h|sJ7(X1~*!+nb}g4_NNg8_6obQjUB2N`M(rNe#5=b6D7L%X^7dqB+{u?KMh?FWNtKn9wn zRIkGl8#;^7n%s$E&O3bQe5LhdTFrH&!TJZTL?L{r(+l%sLZ)cEK#^VFf4e9V#}tM& z0`%-@8^#a`87mY3497yPs`&7Gp2CRpTs*8Y4^waY=zg+BUm@`&wRUICKw-z1CxSnc zd(>;O@`Y#1@mbZvsw`qf`Ta%|d&`}d|JXFSgckToVgKRVUxgS?2yY{T4|3fSl5Cb! z9fuHYy_4B>boQK`2hv(nU!0ykRIta; zGq;^JCGVh{_YU${Eo0`#*7rb3=Y*wBb#A1dr1dpw+cwvpjOvPOxbA=ViJO9YRl0n~ zqfR6HW0~|MslS){2#6iCUN$Co_XAmeOhu;IfxzG(m+kc5@?Zy(;JrYgtOId3&niYG zz){d@|GLA_4(49^WpuBA+0f}6W6zI)zS>>)OdiE8-SDq>d3jpJN(2D85Oax&79$ws zb{v|MnH5+9Q~5AIzw?cF1kf77DXxPe9>y> zla}K#!wlz62kyIs}X<5vgUg`S^3Sxa3e#D{?L2n)-}t>8lZ^wF0;;gZrOhIIdnv}UsS-3_QK zz>lIOs3#FW=)C-1=p45AZW_J2^ZNRY2dN^=aN0CCHD+b#2^IpaN>$_J)G{gYh~UnmlE9Tw=#`3 zITBrE>G+JRL22s6eW#UL_QPvJ($=RaP< zRw+6s)GFdbRegDK^p#V;U3n%rAl$@9*|DeD_?MvS-3OhHK?PYg6<6hpyqhzj`G42F z%F~q0PrA@d2(h$mKYmkbJRkaX%(KxvfHfRO82~7VwBYFI`9z4BkfA;HpIkfj+8VPB zTYN_#R;K4}T5ifR;YU5kpVZf@&wKendD2FpL2WR;iXXBc@CLaKl8z0)vTT^W*||Js zb;G%f+aAfzDMJZ@47T98MfdKVedcEqv78lKyka7{zfy;e%BSb(eQ=a;jt^7Q!v;$mA3Sbr7-_Z z228zfU;{OFRz3fmkiQI-T zCdClCiEP03v>t{32m`oJZz1I!|4#w|?!7{RBQFa;9{i8`5E)UlnuEyWm_qEl zA)+w%`RVb#KbhNr&B?iFS9`-_bPs!5YTd2Z-vk%ca_gv~EB3KcL7YJKluZ33s*r zYTi?8%Ahbes2Ub+#2_=?1peapbYnmnF06Zd1bWImzQ4gUi{5cSR2p%2pBzbjk%v6^ zC7UfAtt>d`AdPTZfGGL#-J6r=LXX5tcHQZ14Vw>;)Gk6H(*<5(> z2zy03bt@IfU!~;!2#tkrRP3k&KZe%m@7_g5GSx z_8w+S0wk1By_TG7b%lzA35#@wD+1x6yJJU?ILKZVw@?l$s@kc6J((2iZOnMORqF}a z$s2UYNhBmN!;^Jyl<$^C-}#XOUGz!BFwCzqlCL&N;Z91N0PRk)0j*}_A5?gy_@~wy z#&M;d2vA3UWQ``(cTVZeAH(iup*v?%x*GJb>Z@lrJuy*(AbFMxB#~^pN?aIWz}yrZ z=*@a3=t*C*An!9sWB>*zm(Mo>$91saBk5Xt-L1w7`H!Btb(*~I_wo7gZ@W>RU``cU z&a9H|Im5F2-#fT{UcUW@0{MkBa!1$Ef;}g%_>HMUQY=KkXz2MqsI?s4U)5=%Vbx;A z&_oES!{3y_+j#|~-PWv)Nl$*bZ{7I~WU!o8sBGb6B5!_4$7%s7edL6)!)xlw^ye zH#dq%+x2bZ1O0#-G%pz>u@dkSh*7>m2dWo>nMJZ}&bR&yAiaZov*`Zg&&$`o;>t5< z!L-0M+RdPs1!r>_nR~5KJ{kJD4>hsnpIc8pBIL+wuv<$7f#G!;A!^*>t=U;02MA>6 z(x|~v)W($!9eW5_ZpVd+($5!&peMze;K2-HTg6c2R;M7)eaoyMIXC6caTniEXp+X? zKZXR@QA-&AjSjQz8+GLfb0Y2*@jOLWLDuxDblZ-(KnW0r65uSFD^tf<;G{a;EA$g{eaS+|<(7-V8|^oA?tNl@pWe zRz5GyvPP4)QZ-1mJcu6sSCu1y<-@!;Z|>Yff=;;Nkua71S_OO(7XXC(o+8ZI?3$U4 zpwaxkijvQK72Al~Ou$9-%zd;{kAutgxRcj+V^>=z?fm^! z=A)Q-EyEcE^p@7!SDbY#F!l83F3kiTc&PfUoNZ{W93JzG250-$KE*#u@*AfM-+c#W>K?W)*-+6+0z^_d;X zSopa1rX0uyI$_~C@nx@V# zORno<+Mk%B%;4KzEqME1C6-8zwNyt^zsI#~rG)}&3LxeuWS3Xa(Ur_F84M&Oni30Q9 zOnZ9B8niQ*J+2Ci4csLHLmug4JKI4NRA`c=>^M7U(R2gWLG%VbtrM|7S@&k(;RVp3 z?;T3Jb;FAV)OtSCPAU{kDvCl`T%=Vz^OI9a8x{C!&DC59Ct!8`^C{)wE%d#T4&7JhLfr-_p~!mg7vbJbGDxvveH z+&lnD&RMlL5VBUOW_+ut`FPm4h=aGkHvB?wa3B&ON4n0@^^#`5oyZM?^+@SugtyQI z4z#nL7%Irq%V?(1lag@)rHRiuE70OkwUN=u^V9&s+Z7b--@b6r5=-^AvkTgj^dk># z2Z1!~_F)-NriU6M6?z??%;+vm?Ht{6`?62dwT!3FkG)3pru-M7Zueu49;asE`dt-o zwuWEoXRF}an*!BcTA!Flp<0I~dNkmSuFz7%7VQW_>iukz)L(k_%^vbXdwSU}WV;}~ ztC9MLVnxlOn1;TJq=ry-Xvxy|Ycb>+2ooefEsk^o(NVDH@-OU7o6UWNl8=qUbdV&s z3FPvak*~Jtd}d+Atiat!y%F{N&~fZ;+VrQ^^9~l#2GCc!D--Yg2+|wCMU?{>zCtSk zOW9cwqR3-^#LQ>I#R!xcwS>LwT0oJ-VD19)@x&knG5$F&h9UeXn;n>&;6vG4r|AA; z&kKUiCMGiuW(TGpnS1j@RbA|(iUhMA_UDaB_rA)OtGBMLA3(TX{-e#OI-Q{e$zky9 zkn%GgGi#?Y0>}gL$+h6n&*W|aVEc&SYX1YBD2F%ts}pvW@W4y1DR0|vS{#xHphni{ z95q|YfYekwC%j^L0FNd>@#Trd=j3GDMNI?$R}5NW12W0$GidSfKyFLdo$ql6xx(Sa zbWVbR8J&TGH;9S6v`Ma}KT}3jtX#ow<#TB%TNH94=Qj#~ z64|i)t-Yx2c0r1)KzNJ$2z>Y|R+Z^Y$d5A1Jw+AWkOkf86U;+85P|J_O+k-xDOJJ zt>{+X`K_6hY+1BuF}Zk5Do$0lk!H@BoojF5$LzXiUb9C#WQ%$70CuuNd`0+jYOefd z9-4c`R@2SH-NGb^>CT!~Tzjegki>Pa(gG|1blC4)pLMX@ykNyA)xw}ud|$><43Y5B z_X-+6+m{JGKs7+^!C0(WHi&GcmXc^QmApN??&+rb4-ZnU=}CK@54Q8>aw~YXq02c6 z9s6-LHw3MZy^a6H?p&s@f|{EP84)Y;?%qNBHtM&surRRK91T@=ig;zLr8xRxw12bt z26bikX4?x7H2*PEuHi*yd>%PuOH5&d#Tm-}=17z(jl9Z9cTU;tr14-2GfXApu`(fV z4i8scv>G=?6@5Nc z+k+``)Zm@LcN*d)0-F68cAQ3N{HQwW%}B8|?>^PttO* zReFL^flhoe*#7K#nPUUA@#%xpUgBsIR6F4%ep51eH)nRvb)*qshKC7hJfBDF=0Y*m zL^*F5#)`mNE3-9*p%E2br3>Ri==U?oQE@4(1>@JK6r=iOlRfZcSM%L(_E+76D|S@v!v+YYRIMI5=U8DM7D++LN8 zf&ccfPYbPMU#2r`mqUM|Cl{*;|NZz;>1+p*SMknAl=6exRf-f{k)ij4?Od)030lN?TXqK^!J|@kp>T#ql!nN)4 zewAwDcE8j7Td@UfAx=4lZ~te{24KJh?AnfT3yIR7$Ji~;(O>+KAdOqV^Q~N!z-;61&lIOzfUI6=W_>=^Dq5a@i?j!Xpo48bM+b;jpx5>=cqRcpI z>+{zGJ!c4{J`Uqz<{R6T`tN}$G{I}FR~)!zp-Zp=;YrO+@k_(BQDc?an+EMx8O!nl zKAik46)%TA`7(>#Jb?+nR-BL8_w|k)?_W%s=SfjWci#f zwbL4;NmBIV&b91sMh2W;x>sH#i(ZFAp)<)$9&)H4&LJk425k-eWzilc&zN*0nFeJA zo3^YYyJ<#xkJcF>bbqhSEmubGd%rnB$8?z(kUO8d%1&^DfPpwA=)Ck672;I3ZJCj% z@tVf4r7z5|66<80t;vhoWxXEE%>WlZ*07o(l*xOsHmUp>|H>(toxWxscu1(o@l3T^ zdxPmi8HQ4su>t1&+rf6C3f>tl=k~u0YpA_o&;2;=LtK4?Cw(tGcYK+4ixYLe*;jvV zBd>OO8m^nmn5DJm&|BBJo+I1j+AN9Ib=0W*je_6{OVvm{NnK2a8Yt`CP@57th8&xg z8FQJSaaO!6#Gol3DyzPU;^u<27nF0QuWo%Sj+PRML%Ux&lH6M-`JMc3T|WI=Fg=q^ zC2!wY(SflWG(xmXN+^LV`?vmbXseIBTcU!gAl|FN_2bFY{1?Zs$T;?0I;s;OhL?0| zl3v|Nej*S|3LDy8fSIjD~##!{xGTd_zrQk%AU3-Q7lRY)o_QP5e8(+Yh{7zDf)G=>!r$O^}}mvHVC-^qoi z8AVEYuX|wv6S8wJXY&}R!BVG}tbD;uxOAVTvaY~XFZ{D)53lOOe|}$i2O}@)D*d8o zC*0Isf6e0WVUJv&7B~BP?ZZ#I-88oszr&HEb!qIt)EN)d(xdQKvI{#p(to&a@{Si2 zj_C$aKh<<#h$<=IDZG57co&0(X^oHU{OHHn)E8AuefCXp0uDCT;K89w|2VK4-v~0= z$T;{e{mA$l!EJQf@sMrbkvEFir{)HU*O1-YYZEM_Nj=U@HNnB*cBHk`-#q;;+wkta z?ITuZfi2Ll}tI zqN5VL8zTi+$ai9qHQqQf7Ax#|wyyHBUc|w+EY4hM@ThVJvoQ_VgBpTfhuzy8sSBN! zOAXk4UU>GtQ1BYSwI*l1e_H+xyOP3Kw!x74*u#QvA3WxCVV0nAB~$C2h5nfO*D&-B ztn-gOE`%qWK)=a(d`2fnf&zw+ zFRC1wa866xf7QS3vNB!j`Owqmsgs^kBRzyCwN?)wotE?%1FIuBsE@54*pu#t_K`xL zB&>;1Y|FGUgeT9D6Mj5%ob7>4*|rYUaYDdq6n$Y$7CzerkYp$9Sn+pLc+K)-8Dj_X z=P?5yOF1g4?LP1X(&dUcu>cT+hwKE2$wP1Duzv3`rl9W1E7NxMgb)gD@wbC)ZLfxU znqOVJCz79S_%_-xcDK`ol_P;ucJXZAtpgiYB9qR&CyvF^Pw~vZ;VyV_dBj}ByCuUy~If46v#BF7J!#!Go$#c>lZ&t&r6lQyiWwDhS3WhRL}ScMp(8Xq(g zcP4?CHVh@H)9zjPYOh``FYP$^T6@;2d&`*`h#2CA7+BkYu-p5=p{L8=t@{XltNf^4 zh67Rd2UG56J6?yVdOv#c(7AGZ;tfWqz;gG#D^)1T&}`52(?U2Sb|e;V5N@XX@k~ORoTs~Kx)~APrye-Pd(e4G8~(#f`Dqq3 zjn}2Q!&v&l?kxJ5|FJ>#+T>nTRCbUP_cVam^r)#~1c4kCOg2^Na!@9mr9G_%(45jpa2H!pmq`MxP3v|`!<10^} zPayD!F{?#PTeHUvl!a-zrhB$d&34aclE#u4sIpKh8+}keIhP;7?G$V@`U4@4A{e1H z9;F^(Ns1F-6H=yNkDVrehc2utdGC&6DmVgh{h4zp3qS@xpJ8Mn@DK}MQsY%zn#GAN zmpBgSaa}f%Ldi$${mi_NzA*Q^;cxzqzBps?yekGaSuVfb%qyrIc}jMlfSuM zM$u08GzR!eJ@e8zqLrFY_jQPyF~fJxrW-2vUr?j_v;2-& ze-`<&Y_bW-+6hzXpivph_-)%~FMopky%0im-n((e{p_HpIJhx-n#d|k+>%X{C|$)n zKT2GfKg6FXEo~cLaS|&&m~iQ&=&kJWr@65kHl>yAV?Nw;vag5onCiCzCU zJw>L#qjMpKa1*Z54e6%M4}+i@e@up7)kD0d+^6=^@hznOPd*PzY*QAg;{YJ*H~w!B z??T>O#Z{`mE;D}KLF0i1&CvCMZCPR}7;0Al=lm`Pkv_Nx)|@d+VfcVe=gi0>+gZj7 z%nR=TPg8b@QSUD%b=$-7SJOnze1-c@m?Q802XsYyqE+Pms0UE+0>Aa;mI?AwnlElJt}K|Mx#^^B8h(+h%x)K%I}uNkK?U*L$(aQUbDqW-L#)RG-AwbXKX5ym^-67 zP%mkAR>RfARxC%*KM5ZZ>BJ|6puIlz@wG#TH2WNw;XDN=a<|#(MQ*u%I`67kTNXW^ z*6TLi9pNXxpOUOdUjNS{w!#*^wk#P&UsEh?`2$w-HFmM>!uD5yBkEfXpzZ<$o15sR zltIUzw{=$??)FV<5sWGq4S14{6BY{8i$1m(7f2bglI;_%$N&0k&FgO>0`Vy z7mn0daZvcZU+aax4pZdxV589e2lw)J&vi!{&N+?mRRKMro z4{ZPme;}3{Fh8)c1{nx58#(+TIaDTy5RH;;yzW;aXyOvt-<)3ZnG*gKq+P?VN&*Lb zV2s3^ynUgduzJDUpez0aIIKPd+`{QggR6Kl^~f zCMo=N^!rOE+z_&%vhIEGf?Tq}nM4KmTmN&Aq5z#QPq1}tO4`e&WusAI<@X1%Bh%{pSZ!m6?-(WS@|JX@_w=b zR^DN3@E#)x;)*hraI`&xHghOT#LPe+SYmEfU`^9O(@rPKJ#^?jOv zR;o>%zrnwz_l|Xh#BoyM`dYmv25Bl8TLAQBEEm`_ z;XsguI8AqfvQ`Df4HipiO2T^u>xF&`s`vU|{V9AD6jtb}Cc)ig)(U zbk2*T%PPU)lsTj-^S8XC@7Kr_?2CUScND~+GZA26+8^*9(OAAi8 zoaT>F%5YLC|1#1laNkP`r%B5RP15=*b=51(G3POUxSOeataFX}H2Jsv6A1+A{Rc-)GOR1LD6N)qbg7iDH!S?S+~6Kx|E?UGJ#X|R*nJyN)kK*v?s%f`zL z0QY1(*`OjcP>FMhlOH!bQ{I5Rc+-Byr|VhYCb;3`ci4le=D+Iic8-6ymd!T2oI*|b z$i2v?4Q@AL7YI-E!^j1?|82fUxJmKSncR?fTdCh!CtuQ;&;O3}oIeD4>@p%oimsDI6iYWXg^<$;UfUJlEkrOW!9a@(da#IvzZi--|-5#?%piaI{a+d zq%uKI4j)WGG9B|>67fEM7ZG{Z*q;ika^dYS$dqqnQy{DnQ*Cwy(^S=80NyD17GXWV zndWOeU)+F=XEoW#RKw%ezKjP3qZWgG#4iewq!igOuj?aVFnQ5 ztfpYvIIK|Ss)-TAz&QE7FdCXBe4Rb{xcRon-Ml6xrl!dG2z6hiM$FCD+Ch&#UW zVa;oJs0Ul(^?@5MWsQvIkY}&+X?}&PTVv%ZC#->U4#qbdF{G%0^s#q)c>(xU8UOZ< z#yP)4w7yctMXJHh8umZSDUDvjyrXIK7NHg|CGt_OLA`{u&cHS#ky3$cXZbqz#_uR51K*w)Vhbi>$VrK z2e$4@FgcrD*O*{?X2LdQluzbMrwMeK+S%bwL(&nmQduHEvD3Na!nyzN_JZ0&Q_2b+iSvk{e zjb;V|ISGfIlRL7%NqW@WG49p`f(+r&l&lUh74fjjmeWu%%*o4Y*XsXKIANS|f>nbPR5u^&8 z%_+ItXoxHYAy^wiFh%ivFlQBj(jU~{5Juq5!Rc<4H#InUlxFD7I`$gI4E%~^R)F`? z4a+E}RmI;W#Nn$hbZ$_xdhNiqo>PR0%RK~oLULB4R9uZXMF4=9m8K|ni`B#3@6zAP zp!a<_G!jN16vm0$UE3&uuXs`=KoL!uDp6;@uZHv5h$BZNt$IA4 zKjhHWR89x&b>eFj@j6?#L8KyRW+H?vVXpsP8ha44L&<8F!hELm_tXaLO63>jrz&^k zuJe@Y^Ml}8%#brT7~|DW3`B&>^S2>f6RCTdVho5p;nGc?neoqb{69H?34k!?M)`t` zkR`S;5T!mxrT@YPH0kh>6G#rFN8H`Rmf-_{V14B@ApPYxxlHJVC%3e#MO4Hnvf2-9Q%v=I8K z+)Nw=!O{q5&DqoE?6GaiEVP8B%L;3{a$<8_C z1Y^`@j$d-5fXiGzt>>fECX5(x!3Gh%Nre*b!r=Agh%n4iJAgwr^^dAwKf8k}KkucI zv72|w#60pdq5tsFbC#z7HCCRb4h$#`ohq|ceu|rtsdI$E+i~os>77n?3j0Fm#xD%) z9XdgNf`*^Ckrrcgp;J=d$F;Lsgpytt9W}9qD{427HKv8F)I8CIar!j>hA*8pFi=)) zcBM8=d>7a1e>%y?8uXb67)Fd2g#5HQ)G$1YtA2tC zN2)-)R9pzMQ!(XU-FCB9J_`G_H_1&k=i6O7x@py%3@hca7{9XS;$wDUx4SJ_REJMe*e$+fIFGnlc z(w4RKvbRc~vjW?9ifol~!2hW7QvtA!V9gp{Pdk;2vR&|C`v1y3K{2Qz&OSn(Dxc3k ztHm(8q$}7er(|#&y=w&;8BQtKNJW_DsJ(m*F)T!oYbXVN+d_A)0^TJo#ix(3=Gyu7 zQ3JM;V-WE2Km|0*_qbUGSlwy2iG}mwjS?K#?QDSoY1nA4KuIQIx~cPi@@II32JLC#~Xj3cOE$;G1Y);fdrSK=5x z^hqLHu!q#!fxHoGvXOc1(w@GKc7glfNvE{>-mLHE*5bO*n@s(-_e(l1QTbeUCZ1mX z2A!yC|3%&ZZ6=u$&?5jIiFv(iY^C=;^Q#Vhpp63D$*=!5zXe?+_&hnt>HlFfg8Mit z9Lp?w7?UzZ2H9PA>U;PNUquChT1pUmDUO>{ePrU7p2c>D$C4Xx?+#yF zgjbq8LIIWAH(Jz$IBA?5z}}hzbO~>HIO)@ouML*v|EyUS@#ec}7IsWE05WA^{|VP6 z?b~4jz=AxcL)g5x0S*%vnDM@Pq-7cUzZ|FK z_u1!^cm?Zroc{2hr`r*JID%?JQ8k3T0G&-6JUiYXa>OxTWWsL_d3`Te0Z##sdkqwMJlEvrz63e`WC|!K8lq`QFY2E3*5&nx*nD z8i^}d-7+0(Rb-8NxBb!di-PbWUORUZ5Br=O{M?3sA_J(Cl zyNzsVO*dvh&lu8uf7k{2!*{6g_iXeSE3gSMGG)3Jf%N=<}xx$ z_XEYt1t8~g^IKTvaNYq`+R7g_fgX-ohxbEy0oHy|>{TdmH-dk}gno)hgj$3ejQGiG z$3R!i9K(167SNBku4Id3OlBJfQRuEeejfh(WaULeuuwQwUx*S;jTzXI)Rr|4u+P=K zIbLQn+WP%a&%mXR@xE$(p6r~`ry;GOStZeAHtZDtNk;OK4X3HXrIQ*THc@-*75P=u z1q4C>m!BeE_f9a=`8@K28`9t}znMO~Z*j4P$2mU`JZdKO9iZz%i9fk7l~B}n$1<(O zgc6f>on8SL9t z^!f5$qNp8dAzBW`@{z)Fi#_~U_a>0h{}tZ_zP*%qnz7HMo7tCWa1x?a*9r|{!47** z4mt>cu)e;zX{Rzv18N-O=DBD8gI(OS45-MLBM9g^OgY$hV?-}8l=Wiz!(hP-qAcYc zlF&&Il6u6Dg%){9SNlEaGgBfu502q`ua&**e|Vu`&Fs9W6lM~a;YdHjbMFVako^nh z%joNcIMT_D5#)QBuN_S_E|_Yt@8xTeb!niXvYg&XVv)a1$F?ZXGZKuY;oU5=40i zW<6u%P~9)S*aF6A^}og>%e{@Mp$PLN@btG8?h^4*#LcNJ$kH_=(2c1gVczFQm615G zGCxG}Ri(juY{X*d7(ClL;|y{gp0o4M|CB^mf#_cMt`+ zgqm)S3Cst}2ONa)eirjk?GRx+VSQ<-xYrb6hT(#|u;6B$OtmR*C%RE-ildmOc1^QA z>8_OzR7NS!I&ml7t53ZSr4cvF`r*W8%rk{3ZT4gKbl&q2Cf=Y!_Uj@`>)j-G$qHTT zM%^WRNkD$PzwxFkW3t>;VsEzi9z6FmP1J&_z!n`n_J&G+ly6h_`Q+oB42YVLe%KD6Dp9|rEP=ye9*Q)#+ z)rheTVy^+pP}#i=A>3ra7}T9?ji>0s;jfG$@LnQs&cNgHzj$vUUN1r)`PwAq?0f{u zvom^NoP-L+ayv?rO$a*`2S>C;xm_Z(Ret-J4m zEjCG&@*Wv`!}i%%d?Cq@U*NDb)sWWyHtW*Is+~8e!)cr3eK$Uk+;7U*e(WZ$LlFE- zf>6|fYka0x)5x>Yr$jXoH32!8u)q%UFx47v=e|XX?VoZ!vfw@r4?ujX3@xVfw>frS zPSW=fdy-kVeOclTs4`p%MoWSZxP?i@;hz+uegWchtmH98n(byd;i~v!g`ImfGYp-H z2~Ao?n6v_@M>xV;5vr+Qn>`{(?uRfOur9BX6Ahq)*ssc2LVhH$ z6+HU|r`V12qVyoi@2U}7H6oto%fCY+_T0^psShoi9j$G~j0k=#5%~Y2?eTedtO9tK{VpLez|e~zI~9*82ob3oq@iJpwHDNk3+c8NatyB7L*4#u5=WE2c+ zHavGh{E{nw!S*qJU-P~;jHcPRarm|o;Goq)3J9S$Z;8P?IQT_1u5QK%as;9csCLwKnxW(}Dj4|E>tdE%H7GWRu3=oH)wxmV z`(H&GX6fRelTSI@2=vXJiHJykDmU=*U7{9;;e=7|xo=bZO5!7M^2#TST$cN&h4dqtGns`mj$+= z3rFiSp*CIC3Q5yD_jTs;%JX)!2clEa;af31hpWS7L3`>#K9!3>%sNqyQERthTLSMb3 zXpXNpV<~mp?1^oYK+-;yuyJ7ws4Hx>$vlKI`+e}73*{@9)u5sFi>M9r zekvIk?h>AuK;2bz$Y!P$1J4LxxY$U|M=@_Pf|x{KolR^I;ZsdX5=gb+xrMnxq96+Z=M{H#R}k|JPSpHhpB-=o*mS=nvJ5 z{m3uebQKyD(>G^3YEZX$dUJ%MbsPt2odrY3+9IdSty?WSaJLVP1xuqUZU>M4BBw8= z6`XinLf~{Br2ZtTEwyEwM;FNB=A6oul;?HXJ^01FsuQgr{Mlf$r6{&daum9QK1env z$%^qT!wu$oW=FiR!e)YYqs9?#P96gR{U!xVTChr0nQaR)B#R~ETL#EVJ>jh~mpcCs zSziJU^&b5_i!t^k`_9-Rd$NU;FoRTsLPS|=h(va2=86{UOp*|~hN2QmmeB9EA(W;> z5>t^>BviD%=SThD|NFcT&waX{+jZuAzu&Wd&gbmkgsH6c<1G+!bznITpp~7yBcdJv zpJ2a?H8B#7m#17|g6QK7ty@Oj+>g299ykuwiB?iQ{GLAWEG>E%;<44|>b3E;gY&!B zeI+g!Me?!O#`>fG9g*>WH8z5DZOqOgOHDkm=sfX8k)Tm^C;!s8VW5=E#syh;EpVLR zAS(A`4@vf5%%{`&PNz=1x;|6?<*jm)}Nym{o_h|AF8mi_A*(~wcv zzvh{*ZO^V-H(R$X&bxu=kpLIJwJ2u7IEiCiBG~_8V6Es0`Y0 zABxe&E?p7o0|JMfm}N@kKa_&~eJx07ld+M`t?#+9s*WR@+12e?n;U}`MXD_}H6*Jn zq`a`2-b}D{h584({%aR;FlqRTg(a_>fn&CPwX45i?Y}zRZKyT65-hFr;C$yI7Pq7u ziZg31!-3otg)8A2v6j4doXVLCjo?ZsyI9w^WWmTanu|-v~8amx@Xza5&>;bY` z)<3}w*3u*uY|1~^qE}OC5j9~GWRJSpHS-N)oku%A8P^M(x&k*`O7mvnp?9#QxbI#g zZkXVq?pe8mo~G|z;{@iTE^D7X19r81qziHOo(Rg-brGc|bfWf3<1Kc2gL6JV$Z-@9 z>3KX4)ip;oP#OK^++iw$B3~E{=^|A}5vzEdJqq%$D)r)4csp}-*k_AMb)))OX4xC0 zF6(sLe9xS?jr}FRV9R3Pxql_3^c!wsK6~Oi#$OHkI4c}d6kkgG{0>*e2gsdPv;+ws z-ZpygJmr9TB)T^qzIBOyWY#Zt?HLqX0pGXZ8vt2}##Boq4$VM41ioXv(SxB$uGzB~ zn(2!ZQk`<;7VPG_WQDsrN$LCB`@8G^eqV8*N06bgHGzL#mFC(Vmm9G%(&wYawCIc! zNsy`QLEnFu^eo?}rtKSFBAidCZ^!V#!s}v9B*&5TJW1F#jQ(oQ`$-(U|F(ZodKL8b zes@Y*3%#%2+u8W57l^a0n$}%-w(u4C+7?y!wo`=zI7+%k%aueVrs6v-67)I)Gx(lO$ zfNJ6bUn!1$6&ne(+;T}sjEA03>2YZNwT^*|Tt=e#cu*tIhAB|I6ewp1{b3OXy zLdsKUBP!HPvC|)oWZ7s^d%7mF!`JDLnU5cqxS2gV_(m8N1&JD9Vtu#Vi@fj@| zk4OQ*`iRF%$dfqMrTO1t?;Y(t&x|D@AHnrs(V@f0RYPEPDs77f4 zNf|5MAw~atTi%-^yT^FAJUE($FlN;))m4QuGe#_r4X33o$^>j($C7c6|I|mkKG~EB zLPC0KvXt@nkq*EeH8nlCREd%omQyAE5#({Qp3CWLt8H(Mlh=M7**#XBj`Qqoxa9b) zq0*;aH2G%x*#@;J-bgJYWeO*P=G$@2>n?vAQ{vbY)k#mRvREVxuF+xW>G5=rglLW1wazXa&(byf&hdqkiWsXK(0!~)fCZ9n+w4=iwc^pjGsG%wH~C!{tu4_AEX`nc@3YzFr`}lKX!|q zfNO#=A?tO76gr!opscf~1y@sJYVa)LgFe)+I49J+J&Kj5rn#tVdd$^X$I%!SIKnJ-@vsJMQ@_= z-rFVLa$PafVkVp|aLUoScD&)ot67{2aaZz@=v=tErFS|fpOa5Nlhq_$&qkJJ{sgAAr-!D6uS)islt7{JL30w4TPrW($;@(tE*tO%9v7b*HcMPF7I;I?QKT1kGZbr zWLx`X`aUVFfaDI%ogi3ISF0E5RL#=-Yfo$5;moknx$OIW!UzAV=NH}!0ulSufF}n1 zu{*H?*x%V63=TP1)&KU>bTb2u896SVeC0~rIpoNCuj{S({;E?O-*32%kr`-=Ud@!i z7xT3$Wo5!;DzDsSb8;mMA7v*JFUX1=n?N?5J8|mrT)Y5Cly2q*AMP1=A(|fCAvi2Z z&_c!D;@+rIPMwp0|7Pg`U)Bk%ggM{7VAS2Q6h(p0oc|8R~q+o5mDL63c?HuD^$2LzeCmtuM%N`}H0~7fz}r4v#*tJX1^V zMY7b&EDuN9IH7f3QKBw^%ugG$*C!sDSlxLi-7Ft`p&Ca4WDMkX#Dpz|dP-?KT8I~< zX`DLqZPN#Z#4hnJVG)-4EK~<=)*WM>N@bktj&21}$hQ-JndN>qX%~{>%F)9E^DSG`HK3jDz)1E zw-fW%sB~+w{rXzy(Chma8?lMtmYJHI3%F*Tm+}L%(*>lCjIm?T#!^VIMehHH-FDp| zXf^uW$<_O9_H?X&pa!KT^h7jh9f=G#7<8uXd(Wf1G?%79d-#)Q>DDxgqIh2l#Z;~n zJL*tvL}QygIjE!v%VbwJElqmb6GF>xhpkjW4^+Aj=JmI8G3!`9_y*(;C%`DIaCeeK8Hkg zW*6IQyMKT;JmYqC|tGtB_6%8fw4mPS&0tZ65-k`d<<$PZQunO~#iS#K6^{jVJ zze1v@huKgUt(2;6v2*YHMrUu5?85F~jtt(xG@94E_GR1jo@P_zJOUS_^aw; ztbSLB>uC0j(mRq$2o2JWscPVDs!p&~3H9rg)~MR}ZouT?xwAwH(9=mSUvKscFo{`y ze`2bs<||NArf{aaaPCY;C}UtL^43 zp#{RoKQDmt&X+BBQ=x^qbW$2qXWXEE#~GYk1)&~#Qf!+!L&UiZ;FswUhN>BPLJD*P zNZs0CR{YfYMvNs}0J@Iq5Z;BZoH5327qb+a%+iN&A!p~i;Ad-8%7`>kUvQj}hklv0 z&%e@;#x#q0+Xd(j89)s9KJQCmsL?5KbsnR-0Ka%K08H}6zF9)Nq?{tH^ zjjZKr7~^D+6EA~FMMb){HiiEn_M;u^UcMW5*RmGB^f+@NNFK8B=E|~z2EmZrbA;x$ zbujEZ^H=kvn{s41zTNLM3BNXrThVpM`!%+#OHuhn8vXAmBw3EEnQp`GjT6*Dz0yI-%c&EUkD)WOA>a-@rYy zd+ei5RJ2QW2gH=LeG~H*ooCc)jh0=nMpn}vx&c=e4C`B-_GNW)rf=}0hV_P_;15f% zVNeqZQ?KQzvL2_$;YHQ64$#=f)tW9&ehT<11OthNLXi}A?lVDTU)8t|)D8>BiHNBD z)96D7=mbyCvE4z~nGlUESY;6@i&s>_J4swntKYtnW7M2RGH(oOCZZMtXUWY3rgt&E zWlKe=cuXihs*$y{dIpYos$FI2g_d-YG;gnlZeGF;-%-ci@_VsX5Xpu?|xE1DiYB9Fr{52;A+PqLn?7a zDB|^rr)dC1bygi+|LyMSCZqXiOCti9wHjils-{0`T27seK}Z@^tv}US_q+bO18FREd0BLV`<>1%kP{kI zdk!%SuRWO!hIIby!;pZvu14nXIiYABs(nBNU0uhCDMUSUgPqznnG!-QP{Ln5-Xj8e zn7WqZHqVNt;AhECA@7O&GFz8h1a$kdDt~&-SBrGswaE zJ_BC$EBKC*F5GV^9XT`13lLP6#|-DBhI$V^;AAw`&GJ|e1!kv35!>M#S&b{sq+VM^ zIxNRchYD+WO|TC2_=^;QRD?GGZ+J26yp`0VYNWi;*(h=Foc0jvQ%{i;+@EkB9zgDN zc9U3+PXq6EYVWvu4bGl=Gv8Zn(<+BIV!5Wne=ABhpJn^_i!(J&oj3Sjx$jz$HmHCwFRvq~ z%nQR)d|I1r$+o~+h!m{n@FeE0cn2YZ+xaf4L-)wGU;6qN? z1u8rax zSF9!1jcp9k`8lYQ|MXzod?A91fdR3{T2F50(a0)U>mJ!%zXBs%R+uubwd8loCla}dkV+_fbFV-wY6~lm@`TPJj@-TM**xF| z|G`Jgq^Nr~Jq%tP&s z4_}Il5Jhq#PHOD5cge`GG6{C8uJ?v@r8--+uEou>EUE*6c}|!-Oc;) zQBWtnS76HWx=#0hvv$E_)NDiM4`EJ?0Gsrrg46n))nEK;@pXc<&db8kc=>qw1uH{{ zp0E9fw0dx5RRx1&(H@St$ADYRN)v6KY@QJDGtvp(^keKJZ?Vwxv8Z`C0JG>>wP-u# zbDafyfd}4?<92Xb^?y5lx=y2bWh{h}vE)hU zksO@I$g#(|m;Be~O##v@59Ri3cGTxp?6IPy(Rh|WP8bP+t7AK%s_CRK!^6sh;@(%x zT<-nHLR>uJw{r3e?+AaZ1mG{+xPC~HTw&FZrw3!F4?s)R34t9A^z$nvXwdD5)-T<+ zvX5v=67Kb&9(;1;_0{jL0W|oOkUrMakHS~Qq6o)u-|K)AJ(rvh#Ao*KhyaFH6N*DR zYQ4o6CtP&m_bM$1D~vdW-^`AiZ-GcbztNH9=+&M&pJ8}l*Zy^8yW4vNavhwrg~?Qt81!?%-V;I1N0I>|k&=;;GZD@cQ^<41 z$!F7+Tw5;euA9N|=R0y#_SOVb>=}pg@<7Wl)pC%0AlQH1)0Ros{K1v3E!`_McBjXg2$g(Mn0v9n1mOLSA#7nRi zV6^Gmr7t)-r~k%GJFr+Z^*+%|d)>{J+yL&@JRTa?f}4tv0!Up({1ze>*;C4dY2J1r z>rJ=V%xm_&4SH7$XAaU&Asg9h$09>6yeeohgMK0>-89|zko7~%LkuA$`~dL`zoGGJ z-Vp3j00LC~sDYl0h-porkFi4xJr%F0yh|jP|Hn2v>NCE5nRq>#H$)Cp{wFeCPXxPh zJ$BT3$S;-zzI|EHLZ0G$-HnSDSKxc+=xI87u~IDAV}qC<7#I>K(nZRotE_+W=Mt11 z%B$qP`1E)}P9_uaM7#_>45(J*%Z~Dp)oJG;R4m`6wBy1n1JX%mdWf@bZOu8m$0M(s zNdC7m9NCyn!Zrwe{qXteQ%SwsgE@-3dJLMWOYOYt#;K&$?u?tHEt_DcomQ>iMylL% z-%LH)WweOKfI>BjRf!4R&UHCVH?yVvG_|~W1sjoA0g#j2UnY=ll8v9tl$+_65y=lV;*0> ztX3z`aL*keV&4qJ4Fj`lARBros3#Dqxn+s77vfO^ZVHTz>zq+0{CNOF%~^JWGa4ZN zO+6!i9k~_;Jc0Ufo^OCm7@ypW!fQ|6H%6-9Dj9cB!88Y?3=&$JoS>hp+XLfV{wu;!bxSpjg(J zkz}5)u8yCLo?AG)k_mlN{nVrbVh$n#%v3%_$(LnfmMeKH*$WMP;*`L?xZNi_>q@UQ zfV*Lt{$rp&Q`yF4^JzO+h)~9L{(9iX^<@$kE8xNypbH<++cZ?Cg4IV*g2jb4xlS;? zyxdaYW=i6Fi-{tSuY!-3b!`$EN4)I{>u6d=5({BmU9NNheTB0+?KE0UTNHWl_QkzW zQZzGtIu~PZCTS7>@xYUDfB;mhdZq^LK5^DE7hxcZq?^VTe8Pkhkr}Te9rQi)h!Y2i zZzJx1GL_%DD54cHwa4d}tFVzikBESg`mK3V51+$z!B*XwdOExAGI8O@$ZM~XE9XQh z^1A@bL1?Py>nH2(|KGTRs^QDLP?Tg%8fcul@e{@f(bwXUeI2JlG=s3W6F_z8d5Uom z%AHf{R6BJgn(kOe%sSGAIMH)(ht~%e@LuAP#`@!)Qb{z8sd@-A=adwmKYzFmb6#z# z1Ug|ow0*DVp+TcMndO>cB-}Y$N)=f6Lth_`YqQS0kfePI{Zjca-2=7*+g zug^FJ=`v|*GkXxz09^){C)b)+!GIhiS8q9ld^XH_eRV0W%PNBk^oXsS`dv-CHIQkX z{~sKJ$8-u`vW$fg#W}x&;lzZ0P0LF@2}eh5PQZ2Ii{RYGis5tR;GybpA3(=~D683H z9v}^n>5J^fst|1Lf~~NyxCNWcKX{AJMV&^TT_c1!Z!Yej63yegqJv#&k< za$Xg9e64vVwo=MHi0WYCP>^5!s#n|uc_Ma3Q4Q}pF2$U}U6Eq4x6fnEC5d|@&_WlD zXM8RB>*oX8!0(*gy~J)0XBF#U*hf%I*U*Ic3o5^8jzQ`X3H?NWZt?Nq^%fS3cjYYe`zT zi}KGCMvt@s*-g5wMphneW2M7R=>$*mdTub5Bu2U;Ise7pp0WKP{t!=#eNdQ?waeHu zuQ5W(W}$=I9e5X%#CdTCS8q(!(b^_;rsv~IUab{p6saEK1@~hg$2$b+=JIOcB_@v+ zyz;~z)8Vtr{NcEf6-!Zxv_C+9vy@mmZfQ?F=&!Qp^OtB#oytE(l6Ikm?gBCIjk>W%nD^MP3m*pGM5Ept05hkh1Ix+|u!>!2sQ6dYvb zxp9v|yMSt9O*pt*6rA7j0y)npf{-X^iWtOXz5b3?JzDqiln}o-==xg*tQnFZ;v4ag zbuQ~85=bC&5|O^1Bt3XP)}3K$gtg#X1|~_Y%u1!dXV8!p0rXuEHYn2y9|{YH|2Sc% zKI6uIAr`P+=*Pe}+`!bF>cGUDBt#~<=fvW*93yg=&x^tmFckUqrMffPueoelOMVN| zpP~^>)P{HvdsLOu)yMxsX!%l2rHJ?v(6XDlAyJW$r}!1pQEI}XZ6CH(r7Ag?A%_I+ zjVtE4#G_?0`;DH3nA7)TUwmjcHIlhNkj6fiOE`_|M<$W0LgkXIFJ964S1P%Z0fLbN1?_os_-0)+Js_ zS5@^K>4~!B-p=WNIR?`ABXIxxBmJMW2u}B-hT5|iL01irh-n->-*)n#qnvK=av0NSnrX=3w<*q^+(U5kG$*LeyoX^{`4YgK>^>@*Uok+W%&+1>Wl(G4c=-F40jRE zSmmeLtu>OPG_2aP40`b7TmkwS+K12hb<30U{Vk&ye=JM{vK%)n4~ptoCeqhy=$uW# z%#$j1;QNlm4iJ#ht#YD(>n-K^CaX}y2&3Po8{%`^Q%=&0==Lj9g;@8i!v%`2#uQ7# zvddG`cRpBH?YLo~pfjHS4qAQktJP-ofWf?>oGCcrx(CnlP`m7>I6QPW__hBVbU((R zDCGo7Gze-tWk%P?Y6HX){F!=r6$n-X%$AR?YUesRXP;(Nkf|-7)hhgzr?Rg?$f7v@ z*)-BBQqo99cK3&$l1+*hihGIIsXG(6S^@sfc@ETPezvDd8x6P_K?w{y0QHDu&s@$o z(CR>huPUI~0VhI$u_tisfo${Da^^UhN@y^6!*@c_|Hx~6Yw91+Ec?+Kj5Yp4jz(6N z@bVa^Mpgb#fhM6Q6yLmEV&(vQ%u7Jep2fK=v!FjiBv1D9w5lxd51qPAKg@15!<~jI zR;q?O7U&J-XEeFK_xG%b)HV{94LV{%u0Hmqe8JDDL^?p4nviUZ{QJhhZKLzKrPty3 zcpJCy+?4C|-mzQRj``{oDb8^3SiZq@mV>m@!g~z#3t}Zfzvyk#4YvUrfvmGBT*@Ea zecR4mfmR~r4<&%HUeDruJZh&xZJ81sdsLIGxS*R`kt~hV26vRIG0Mxe=IU{s@*axY z5yfQB>9KUH%bBZov^?9umMS>raiX&w_;My+0i9#|vxN4XycPB_k|yF5YFZj1VH~yD zIO=1u^+kBS&5@Q#*-4Do{E8|Wikr(sD2X=AWsnigbRNGN8lI1qW)djXp6dt(c;N3~ zKl<8Xr>tX{f|h>!&xtNbQd`zw{fVT+hOgMwj2x=%Zy{fT_4;o=Bx;qL#P+hSfAJ1t zVCE52*OblME>7CBV(^%Ua;YGr=BY!Eo%8V})#=jkk)NL0EL_cYG&pl|&l;8oHoPrp zhxCD=rqQd#NH}s_ZOc)OHl`gEbwOXIkk5UF(&BSIp=EW3K9WE*5tPKlY!+FY%PJ>& z0Z!)VPS^P<>X4$*_Rp z5e;FdKSo7f_#rH&a(=;?>V)E>>yK~<0BCyoea^nLbW>@do;o6GM ztO4b&ZlJmKskPvlmdhND4pzUpfu$R?&72r~LQ#d+(%=^3Pxrt%70X=>GQC?ibNz)< zfG9@i#$pTUn;{s#iTb9PNdnyCMlEg|;=YoYoHjcsRLrYa-qMP>hK0BfTb~!A3v^t1 z>1rA5tudbJVGnn7IwTEz@;!0}u_af02L#oPIHi+2TAI$G50k~NmL$!cU?dIRw^8bt zO+qbkOjNv9q&4$3U+daGo>Rx+8^a&Je?7~wLn{EYbg~{Uy!3ARU6AhjEKs82tpTiY z+Ln77*}>$z$Eb>0r`7O&DYteb=GR4EM!xF+U2OM{qcN?!@lc{M9gmi&Rf*KExPfUB z_PQN$pG14&m!x5c3DIm<}9}XQ;~_N<|(ckL}jg@_60u^dlzAT4Zp77)6j6 zq<%=(W8HIg!jFbdzG}v-R!>%}vi@a(3|w#T8Oy&r+-IBxWH;<7n84?z@_zrO(KZ*ZSS7C9&F&5$!d8MCAhtKw@!!!xS^T1T>PUS%rc(y`%4nXmv z61jrfGS0B>qfE6F&U#_4FNOpb=9-44TN z`3}X`gK;@Q+#5(25)WWo9DzN=vInRhs=;l*YUTLwYp%!Tkt2D@kZ1rR-kZ0W-x_8z zyCSWq_0~MrM>RapHzV8F2L+M3Cp8bT#H}r9h#n_+9oLlm9W28Ep@*t4qmQZhF31gE z5_GtDa^EqiuwMbn1d*oQ-LY0XtQPe+vhrw*F!|XbMU2bA9E~xBZJAk@99X*C3>S;q ziVVa331`NGnxs%r(KWJ2)pf~Sjv5*(RQEdqqShlJ18h$9HbYg-QJy6mRD*XTn2{({ zlQCGN#tK$rd9<0~zmh6Owdr5CB-JAhc{P|~(My_VzhG?f!-B&+LC+55;3n`zI=wHE z^Mk_B`oHyow*((@J$BYtsD}qI63rVcR}>6!?mm%i3Xa9wz^UIk*2mv-zOyx3-5XR& z4%05pWCZdOEwy+?D1T6q)EET}i~;C|{dq#YX835jB_a*Xe%aPyxvW6VT1l8oZUdU1 z$uG95qRFX4QSwH|IcIU!n_>#EiWo)FeL-I@VcRjo&a`%(WQD=|?>k94TZiU+`-gg? zjZGvhWUE?j6-e$yH<~I$BAOur1Ig;d7ybBp)J+cmK6EDTLC|&ZcYRO0ZFlT^BWt3l zq~3u^O8e`Phm0`$uwOQ^bOYBe(`&%dzXuV`nHoCYBf?8|v6dfXJcL|Do-;X@`4@N_ z#$EDiMBlPo6nU-r6h%P79vF8a8b^J{9_IJGjY@c6NMXV4J*gx|?ia6xd{yg+GWQk9 zHGFa`f5D`&$2NZyQXt3fA-*|#rMaQ=bwZTt+^2*ODjf&r^TQSG0Z_8^j$!`2#EF3| z6Q@*Xru^yVQ`aO)7T|{=;^;a_9LdSk0~7f=^As}Hg3ivM z=0+4eaKkxKou4Ll5)I`H&LNqmqIWT7k!CkO-az73_G;4|cREQ^j>p{9PN=_p`ix=t z>t?Yt6d1=iwPVbEW99XO2V8`PO>Qtw!C^){6X?-4GUR;|=Nc#h0iV7fdW4eDpH2w- zbIn-4U=W<|0+aw&;r^JH&}cy;g#~ed|2+0kMYRNEdmakk=>C}BN;)qJ=~wXU%3H?i z?m90#J#Kb0cF-?>C|SjD*XcbNfV}~n=l?cr+O;~8*1fx=ywerBHsj`43q$0|Q@4mN zME`g0L;@IuV@HsOIwm(|3(RjY?~B!3UOG4X z6o2$;(mTe__`BpNWr`(kD;wo>GT)}^xXL)`3_62L#lbzGDX~XebWkd)5 zOXm8KPl+ws30)KYFVp+1uZ8wzZR(jP!psagjjEx`#P+(M!R?3oAMJz7KlabgsMn1L zwuwIwH4>*dr`lnwu8-w%&Y`a)^goOd0Z8hJObgLZe=7~yri zx8X$9C=J2JGv4g^wMwYyLe8uCdUhWa!o43n6qewi@_QG_NWwy<(th!zmqi4iGwL`-b?n*H&Yp#scm2ITJD86=;tXppOG}=5fYOA(W0#6#aGjr zaOc%${W?{3Jl~9Y`>o5CQ;W2rY7$I%LXIG}@(q5qUp{>(*Le%-YpS&vS$0V)qqZdB zODFdRO!p_GWN3IIm)vGA`}O2FBr*i~TOr9T`Wr584J$}YvQ8TyU47ANeKsw zWk4ka3}@6J+|eyf2@qQfdh)gv(QgtT5S4-B@!&!!#Mj=5^fhE)wsxo%N#LIKIG^L* z;>LNOScc3@bKLs-Y*oN4E3(d9=$gQ`LNM4yEh!#3k=J{79Hk=ir6(D;bPGr z`n8Z@a@0jX!1y5``-b|7)~R_ZDwx6xZbWLK1LEkoIeT_U!2147+u4^zE;QRNw;*O- z=+vfp_{jUTZZ{i8Sy9K8B)KDy0cl5dl)P>(4;M%!?+>_;y#X4ZlPe}qFw#8obkTO;5+sxwQxMM1~>jy@Ag;e^p7dr?#$s*>_A#@v0v$@J!#H-nWTNYam7lErDKT6TBZZlv*}H|01~X9k-$=Wpbo?6 zd-;-2kV-=*-yGR8mi@&VYRgms!rL51nmsdZ` z<|IBlFh~URj&ZrwS(@Tm5hx#*Tvw!hlr(`o%rNyKl^XE6?VTRW-qP{as`3Cpxiv>O z2L>I`vi~YbkUl*37}#-j*$KvnB~Q3&8=WPgi1TVc5gjD~kgY1e2XBV^4*lk<$^|)W z9}}$3A{5VIuErZ%jJpIkcfOFXI5SUA0j1-$5wym&%IN$I!JNkIs1hM=DNM+0R~(NK zn^8{xXr9?~8#$0U%s{>-oYG@eKUim4TN!RU(RPivIUQ++*4b9EvTQ_0a?vBqJWj}A zy5Mmq4h|((Y*0}>!@ewOZ|>db?6LD`9YL>>NqT(TmG}i>d{(GEK*=@zQ4DK;;YZvA z2petA3hmW`#w%*AW3P!ci8qNaKpnl}`2>?SaRY$@^6v=oNO>?Jg2~cNjCV5~d2e<( z`ZH$lXXh{b>hQoo^!DeHW)P8@T|Akr6)ED7jjM-fZ>@mja@mS9vnl{n z0EzoU%#6a4KB+5Shz#=qgp3-~rLOsq$|Yz}t~?bi2AlWUPWi3oi>SO1eC?S>rwJo> zXIce2aT1Iw*0yHKqM`zqxXKNm=A_Lzf^&A=mwr zZ$wCO2Te`rp|`durMW^hKue(C6>nK9?*+3XUhhe^K9|&0ND166@M{fO%auM&hjo!= zpwK_j7CPDi>Wl}wT-_Z6cF!6TgxDuUYiu$`=Vu{3jPaR0u+e$WG%%1fv-;lCpLuU+ zP&C!ZU>BO4XA;8O=EI*oXxJE{nw8Zkw&;&o>c8j_w)Y(xxnt$g|1Q1rE&zNYiui}6 z(icX-jH^+ko<4Q?ayNYJJ-3)|QLKvu;8q6e;sNfi!wc=TpG@|PIf-TFL|7kVjyK13 zGr?|-Ga6MjT~q)}M02_c9ZXTxiqWHV>zQ!Q#S)I>g-vaUP2L-VbF~MRCcHAP;l^wfy!hM2{;+v+m|)UzU>W~3@^Q9 zrBQXuz~@fs(|+b?eO5BBtu)Nn?$9{uu_KB+xl}%F*lSa(2ty`AHaWPP8>#w*^yM_O zT3>3tCG z7iPX$T780GAwavt^M&y1AJXAZoYMNTijxU3-fw>1)?U_|FtjG6@awjOiym4ULJxZ7#)A87Ji6}W;COFHauSZ zQjC|O*6+iDhZ{Pz26>GTnVgS-NCzW@GufM`MNN8x`MRY=%`rj|6|%CyyzfQJG3`Ju z1vi0voqKN!y_`>Rjp-flUcVkexi>McuZB?0( zF^2yk`U_e$%O<9}7gMWTa7?4Eea{~TbdJ|Eksd+>ArX$*4AQneIskLv)a*kK84}-} zb`pJ?t@^hX+J!d@E|9ha?Q6?Y3SxT94HbD2d-hGf=@PRvzeQx;8|lC$XOc?8rgz&i z60R!1khjN_8NLmhc+_P60WCn7Wh&5aTH3f@#mUxAc`vEn{I9nKY@iUPbUkaQ|GHXq zP#BSyu=h#&XQUB&KkM(mc5sYX z4XUF2-i%gUrpLN+U;2_tfifeGxDW<1O@)l~iOmJ3Cu}#HC9!~DA}Vv9_pLB<}LfG$zM=XVU7ZqGw=_6Hh%$NW2OAhlw4ZG$NI2&8a3I zl+@1$nG|nePrNdC6Ml3NW_D`+Uv3oV*8(=U4TzmZ`iA!b#5oJPgbrjbIAou=r{0nC*JNa@O2_kz+K&W`-)Bh^;{e1lu$HJm3uVOWQ zUWhdptYdaOcx%~c#jIU z-mv+AVRTeyBrnr`H(Yw!wf{-S{Inx+tZUJJK&Q$+etpKkR7F*P41vgLcUwiri3v8OvoEqBW|}7v+dn01if#9^!LUtDc5$aBk;^ zXshDketHso!H^s0=6A?3brK@bol^4IYZ-#fYJ2sWH39g^PYTX+{&H)>KMae_NTu&F ziByhUQ$8`3IqdQQ_R_R5u9qk9H&w=nf4yrJL5kv`;s|_5op|<(`mE3Te#w_DI)^PE zKcnCCT{?W~=OHniVET#Swy*KuhS0P_K3FEsxQcJjMvE+x#G*MA)pzrtp&$f(8sB6B zFaSMDyqv<-S+Y7*OB>>_@Gv?F!`i7Y+A5)w03apd?a*VR#w_sV5+k&1O3s_ApwTIW z_czRl3#$UwrBCb!%F3+FY*FK=n@jh>yurL~`d@?W`?sDMM~N`Ls%s8x3~&n(_3>%( zTCuCEPt~QS@WI1J1+vxUxyQ-X(hONr_la6PB=x|mZmaC^ukfV8cHa4JpUxZLBQ8(> z1l>=azkazN18}c-pFd$vR0404LKD#oYBob#l~5C;f^`8;v|XiE04irzrdacn$~Uno zysbsd??y9x<*Pbs!l9i~6Br)_l*xh@S{w0)J3DQLBj0D88A$)t7Vopii-lon zX&31`PMc_9Su|et)IeIoi#mLZ^$abkQ*M9Sa3mP39CR#rY7?}x?%8$k);dDAuOr)Q z-M%K9p#8^YvZ?d<9dsLjM}V1L4jwBKaZSV~1+?ZV=rMc zW00t`IHK^fZR0Q9Cs*4mGKyA5K1ZvpQrpcp8ieHL*%HdG%1-V&MnJ6vMPB{#Sw%7Y zo-IL+M~y;MuSz^_+0aujGLq;w7-;R$oaN+P35H(1p9o|| zBKvWL^rPe)73@)?sKdO&9dGY{`o_yx#b_1+7@HiTr8{BdJcs&6ie|ytiI%4h)^19B zIy#{qYAn7GCBAee0uJ=lvIFIF&Yf7k5zb9~X+oDU%ExEOgAYzX*H4LM>S&Nc|0?x0 zz-J{#3@>bF1L-<6LPK5tq&Ac*Uc9Cx$`t?}?==3C*5oO!g5Lfp;X0%hu^qNBV zIlxlGmd3n?k+=Sr(RK<(WGicX+ONEz7U8`(x8V3}dx-&)roRmQigQd564Rb*(^b+V=;yplhk zgSm+6jzq3_fP?XD=ZN79`T6;0=^9ye`3{QGcwx2MrX)aEEZ=DPP2KdA00CC0-?|yY zWiv8xmH#bI3*8OPxjAG=aA}a>Qa{6{p#F0HrnH0c1st9LliGhCxd(jJXOg}V`Kx^}Z+-}B)vd$mYHtti#%r}&JM+U3t=!5N)lez%BJD%b9q{w@te z(#q)*j{!5PJRT!dk&ufDEoHR!Va(+WFR}Mc$2-Nzld*Q4GjZa-YAT9?>pH{jpEtj+ zE`N4RLvw()?27Cpzk}5tm99h$9krpvs>3veMa=n#XZNJCYjvHPk zlFjQRCySRHb6o@WZ3gpKi{0o_e`ZBxu=vq5s|SPiXJ6ycejGKXTdl$#?y<(?Iz2)O zuLubI4GSLro9JI(J#Nl7j}?WuFbl11UnD-*=v?_cSL0OoSNb;gy8~J?QUSSRJNS!l zceV*zNG@1*9aQM!aSe&LCCp<7sLCqDN|UH{yxZy|fMnoS$V78xb5X~JF-7h}aG8gW zl$$WhF9e3U$aLFt)lTRguIro)%NZlg%P+Ut$Hjr}`JJq{@THM3>x<7TjX?dbVKSx1 zm{mNLxWg;dp=~SG1^b$Cx%STOXuq8k_JSW6@qHRF;0S?vA6z7hJqwgsK;)#%1AM-K zR*{3rxCF2n=%>t&JY^O_?XP$ z&2$-5`=$aR=6#P_=>0+7QmczM1s!I;+bWt&azU{%DEdw>dr`-=_{`KAxY6) ze`E@~jp*fhr2mZ2#pmZg;Wc#5`N({FRIp2zS>;e5k3+SOep7vfFf!^@(&6}Js87{( zYhG05{3T)rd40e#yo2E0?K$*CLE=X|!_Kpp@lSClWt-)Y3WS$6`5ak9G}8C@a|O79 zd0NCDniUCjKe*WC?4K)BhzGA3?DAB6r3hQZLnRl*X4G&KLA;B>OR7I-bZ2%|VZ1CQ zG0;%lTM2hDqt;5Oz9rBqM~%WCdqC#o2~^>Z1p=+dG5;g)eId*$Mi;qy4Z4Px6NdH) z*ze|yW>itSI~1JZ_*@cwkvDlE>NvH-aK?fCj!8tVTyiv{TecbaQjIe1cN-_C+7Tlq zZpV>oG4XU}7;jYOaAJTZG&NX`-ru#oIZs_i^TVxiKWw(-c(jcOD5|J;bO>ELSSRfz zcEWm#|HFD{HB;4>v+mf4Rg5(ow{3hoYL?v|RtX+(Ma|{Y-%ej=ibgt$&+@#3g3$K= z-8kihwejfTwbZMgLQMbLTV%O{Hh*s$?koN)HV1Iwhont3>>NicZ~SHx#_28t_d z&kpv*E8hqeP++npE%vMJA6nb}#4G4semDSdQEoFpOnB@NfIGbixH71nc^gJzS)mk& zP}sjOL6(l@!HJi95#0c(S^+P@4<4AAmfTO2&t*+U0PJX1iMs;5PRU9O`dwdE_6Zl? z>1@MW@W&qu8WI`iRpkG0d!`s{z-YjN8+OEj){>wEIf76xtqis{RT#GQz;Yu(u6?x> zkiUJ*V9M2H|Cb-JO>9#BpYJvM#tu}wE`_qyHjJ!|`Phpag6=;3bg30Zf9M2<#6)k= zidqrrlj*7fya+RYL)mMx7&N_O`#cgwsqWxIP0o{0*)Q>I!>$8tqo${_g_ur43hcjT zdFUKMG|Bz11MMUt72rgQH`3gAr-_N=$^__XTMCWJ*%{98+_)lck;btWI5=koftGAo z(Jj@;pFNAi4IS|{$Cy`hLpSGl?m~9O%zV}NyCldU7|(7wklT*N;%w7F*8pLrl(_P) zV^UXhf^1lM)Ly|evhND8#dc$=OX7iq4gbP?3z1Z{Z8u8LzlhwU zQ1rRQ_q@4jRQC?f*V?^k`llch3!|5?=Nb?eXRjxxN(t2do}eCgwd zT7|~Ot5v^E`wuIY|BmNg{dqp_SJsz?f0TS4bM^_iITDK!%O35itRhxF%zU&*MENb_ zd3*CwoEM?y=KpE$+rOGR)BQJ6OIwc8IaW~-I5RyR$HM^i2!RqJU0UI^7*InJE{14P zt_D;Hm&5?tv7A=HQyoFM7E=PrP5^}vNlbw8D5J{aopi>4?$2)?sWm&>&jqJDyKYTI6GD%76*jMi~CgN}LF9lgQ% zg%#s_3ON|d3BjzQ6G3#V2>r~pLPG5)w`s0Q`jm5bPyzb7vOEWK68J^TS?+s@=8uJ# z7V3%vPUS!zeV$b8AzIaTEMy_x;LMuy!77lzk#n>@{i(gfx`VaeFZ&7y=3e<=j&bOP z-pGNTTb>5S%Bb|Z(@E=u8Ekm2Q-7hWixZC%r!QzTb&8rAh;n&yd!OZD5~Hjzc38cZ z)LphBe>9=_lk(_wr!L&w2SLwX&BtJO(DHmFHH7(jey8MP;fbnMkbjh`&~Md_pOToRs!t^K(eiTytQI zO#xv#B7uj6#)e<7TyJP3;<-ey3V4+YmH#P-O8Z#o<^s3H%4;v0hRF+|%&3rw0l~0H zr@8x5g?PXv<|mvOF<06aWO|88QKqZhOs+D1c*)`Dzh-wLoJPw?VHPIpW`&4t;&^4ZB{zGB7!ipMA@BQk`^2E6iLuoQqoKk8L#dBV4sAVoZ zE}nV+=tq%*N-1Y+rr{gq^|#$l#1pmQSS9<=f-FQzs2EMjZNB@x3kIX}q6hTHm;1Ee9{8Xu>Te?Q*kE1xUGTShiGSQIE*kkh_K z+`U z$%zDCCkn4>RsI@7!lI6`@@i@8g_{y;Ta?J47s9#qtgo>H^eGxao@&%rP_1<22G^`Z zzaxA&4TbHK>_=vj=Kz4rqOQp8{J2LOkXd9l?q$L4(o(mlBcLEONZTw3Kam<#RPa_w><-`|G6t-XpsYJ7Cwc`CUO@*!=c+x39DB)O0iu)IRnyoBD2cJ=>SqXU{ ziCb)bI+PJ2Zi{wl)K2lYgR%kDmJ*Qf5TF<)LySPZTCFS8e$}@@@oD7cObQ;vAv+DT z??7J+A>9?5GO5FKxtuGxYB3{Dlm!JcCKLFPCj(e`cz|_&+^+mdYOB=jWz>H+xB6uZ zT8L=#M-Rr*_J9j7*or9ZLF~C5X--jFRq8YlX&VieyDF{tdX2n8U(K62t5qfT`e1lZ zNrJjE*{l^Cq6Y7GK>k-i2y&i$8Plm316MQDQXbejP$v6f%n*&?7|Aex0Gx#!$iog` zd&Ch2zMC+&ja8?m5hB2~9%}zVBG#@K?hbN+!V73f4|?hV_8@{GG8&&e`wmHg&jhea z$U!s?6*GqAPNQ>zw&APk0Dmig$${W@km8iI)~CkYD@rG#*mvYBZJ+vkzL%+SZy|ri zwa!IiKES0R9HMnJ*aapM8A2Q-W%LkGVV}5GSvj`TY7}PrX}1Nld;H}?XJWMOQ24{g zMH&OX4|b6!51(u)jPCzrQK9lbnHpMF3UMYUpw7j|2gJ3VQ~cEwCo&r@5cHDS0y*&2 zPDGa+&iowwfnng(#Yvjovy9n?KqKaC5o!!u?EsrZsc`IlWlW4U5o00@WZ&l6X@Nx$G)Wh{$Wx?ir#C4c{3n zMTrR66>k#FM};c`^T~P*EQ>o9l4Zj8mBA7+$mL&bWJ6FOsNj5f1|KcU%X2C2Z_aHQ z+(B|F32`KP-qU1^`-tn6-}7)0lY+tut($PhT&Fayb|JBAl{*Q^Xf@gt*GedeCp5>m z$`PYAB}U2#4YhmL+-kHJRaIzO9_le0j&$3Dbi)iCcOG>>m6z~>p^m%P{6@aYsk2NX zJKB@4)&{I^|7mES^-$wA!`X{dn+5a_4jg_!a-BM35V@ge2oZ)=AP6kUw8m?k!1*iR zA>@v`&6}ahFqxo)Jbi%q#e+Lz!{lCJt}t!_4~N2SR8I57Gyp%W1aW;_LKenDJ1VdI zuKho9jRdWaG9)XX!TrbM~IRK7J zNyDxsCxjey0~?<9HPkD{EV6+B7waA-rG3kz@e}||!Y}=XhN4t@W??L%9wQ_Jy8GrD z`D$(sIf=6IEwMD(Ub%#ohW%6t@>vTmvMU=fLoRqgl3V+h$F4E5IgvSYw)#8TfL@E< z&tH>}{CmiM7m+mTnPq;F^s*4~3u{tb<*Q0p|&!MAA0!%@KE zS(p_uR=QCmhN(2=^Q#gI9_hrt9}KFeg!9XUt7S)j7&irWToLw{3d#PV({}A!zI{%Y zWG-Ch#F2(0IinHYj!+mQYw$R8_+Q_65CnNBsg#9AY4zxYlYlM+ZNV+HM9`acLf8-+ zO{2)0SxLU8SFaG%oxGrkI|NkISDhfWk%nH}cD4Tf=^T6X64pj_wCn5z@E|=bbcR#?-9*2udfb{?uaBXM&;IeC#s4YN6xbcsFdt8ej18GonP8YdLuCtWIMCtNq==?6go^qAH~^N` zt(oK^!$P;I^<@rUnzDD{;R?SK*YUTj>lqmg%-0TTAHDu)C(TFG+!|F@yOXvZp}X2G zc@`x6$RwHpq`xHVTOsTv@Jb#l6ATFQD1-;cJ_j}P*d&>A#qz&IcVkgfYYLnKC#;0p zw;0m$er|~Lmv=X51Boa<$UP3K{ox@#mE8Y+VUiz;MjFd|)^(Y=r$BV5Q zSRCS=QQmDo5-Od&Ypgdao>uCH)swX>4g?9K(}r|84z*tH+-?V_%pCFZoss${1OxEG z9#Gi64Me|JXG=JG?cyLcA`Kx&7-45s`(HgP>gu2 zfJS~fKI9M`L?0M0mc9Ootqnx4@i#7W%+dEu^)1wfhI$cBVhP+Qmsqk80%6s--LqeA zOxGCTdF*jfZt}vsf?0~Spu&9bG!V3bMBR1n>1{J+RFlCZHKewlUhQHBZN9fibN;zy z%6G%?{4Q)18Rpl+xR!vyy_`I8Q!VgN<@sBlu0kF2CShpXY9^y8mQLIB>=kd_iu^T) zp4)S7eVUN*@RIb1pzHQYuM|yF*$SxAi{!Xu>y4#T4XF*e0@h(nfRYhum{(iuP!y-m z66w(%2o>EGHR#(S7N&S?SzN#aabdk^z7}}6I#nub;<27alo(uSI9`vb@pu2+@8YXY zrHL9uwni|+uptB#A<+@{hTJQxG;!P7L+`b3#fsT;DXNb8ir{{V_rJV~!>6@M9qseu z4n5~o%(tHQDkIWx!6ige37NhYYTP$;tPA0UE_p=xM;dYo)f9mzqj1(z7+PIy-o;&zijR~Qdq`pp$amI-K9 z{%Q0IdIP-&`iHC92!{TL0lJ}nCV~y{#MXMu1$+%M=(^N!kt4OdTxEWFArAFLTF|w2 z(6j4`EH<63RUa<&8psMSA(y;3hZUCDsOH zk)#0ujRU2HhS1U!a8-H3Q(u+31)W?0MIKJIMD`lfY5&aDNphZ+Li<;!{p+dj0ofrUEfD!k^(k#of12nUaAaBag|atI zLy=okIZOaF;~1XRU+Ue=h)#{+!uF8=hx_xwy~|zE^YOI(w0*R}gP$(71vK(QsYCmZ z(?ye^f6uRBWq~W^bfjGgW$}YIYHQgO_|3g4%7Rslagw#+XCgs z*?7#GYvjK{M(>-erOwb{cYf{@o=kR4$s{pWQA(Bjpg;ql1bK((&V|0Tt>_quc6EP> z3y3cNB9QKHih)9Wobt(|KD3Hr3KpBG!{f6B`L6gXOt-bLozu)G4f=v^v}-z~ou)$fCyh4RUu;9gVB z-`(d7NRxL`hO+JqFUtGvU~RnLZGQlfE@s|lLDls~#+G}*{kyTd=pYa>$}@0o`bZPi zyHqCg`LTe!d&?3A3flqP{Mn#X?1~iKa<$&H&dD;PRat(_q@!2P0xEmy`utADrEb}` zXJfY$85PMS644G49^lseQxWiLQI{=;kA=>`S!9#+Ae&mqI?N`(#h?c6F@nLk;MOAgZ zpnfBFVkNXmyhv*U`p&q}9_arT>>S37U$P7ErT|FjeBJ|o1x((+8pg~o|HiA|$VArD zz95Pm0b9^rlc)2Pr&a1RWzuIO9bQK?bmKW-=Iv2idz8S}nau+lek34Oa*@ z;Ze(qg4(OnRRyxfF|Tr7?4bT{W6IM+0ym^^sRS^%5#~V8{{4ovG`H0+Dnqx!yM%0e@##)y!*l61ydcL01aOph~1^DvJr^+9U;<()w5m9`4^v71jTb0 zOEm+;uP+et57W(qx(`ZZ(C`T=J*bR%tIiWCMx*T7RZqS@XTJB`0qo@UR*Qw-)6i4J z(`3E{o+D4Ug5hqy`wbo)&J|fE{01N1j+dw2V=+i2o&1BqM5-r+_-KE_G%bMVRnaXy zWZ7SWOR2U2h5zs00Xa}uV+{RaB|3x7xEPPbHO2bH%N6`k`+RQmbuM#mCaVKv`B@y* zIpgQ1jR*k1r7QnpiW?MIn94cK21IxuO{??!v$rJYEY1(~BpYB`U)TAdOkzf+1JVTP zwE=1BoVnE*sZEst0uSuE{0>Vu?if75tP*S|Tc^v6$*LE^ZZ0mIRMx4Vv>*9v=4b(% z(p7;+Z}5BI9#Kyj3fug@bRB(B<^T>A!0C={l=yO$$#l6Zp*aVq-Os4UQELG0Z8C*oBrjF+ZSjo~AX&tv~|9X5p#c~m)3I?3BnvFl%GhDe;U$I#Gv4<*g zOa4t;s;)+IxQc`79F_h)PZMqjH45HUw95b2HlJJ!>X)H}ID-z54X;Y{F}PZ<{wDyv z@_jnXykgw3S6_a0@XT#tdMQg8;T>Jy;7W)`8-f4ARxTsiw#oxhe#lZZa}e){w!VY? zd3CGeG`9-9D!W;CBAopI&~^?I-KSTI%(R%`+ z$Qs>q0T>f1EVHZGUGfU~jlo+)KurRXTijqM@ZcGIsR{q!On~}KDKm`ZE$F$7?oz2Y z6wdc8cLNlD3*^qO8^0!?Zs?%j5W?HZ^+@2R*;?geuK_}b{fA8+q3?^7V%g786IAD^ zxgE|vV#Qv7ckOEcF<9c0#f<(@I$N4a4T`m97tsCz{Uuatgri;gA5x_-u^_&}51tlG zhg3*RUrq@5IZgBGx3*^ML`R(DqiYSqOp3%i_veOJuQ$-H2V#$dopIP>^mR508lK8b z+gPQ5*W<=*3aJ>j*o-D*MQjI#yO5vy?gW;&_5z0DE@@!6vk)vbz*ZcnjnkcigiFoab2c ze$F+^p}Z%e0lcYC4Qh)&s4QLb>&k8R{wGskAMM@ffM@4u9>$?l!3k9Fz=#At>|StQ z;zmOJ@+@or249S8KMqRt!1m(qebh&g8J6lf_UX(5O>A_z*!u{5?-9{*+5Dl zPf#Wz62#!^qCzqECMb)I$KMs>M|Tf`Gd#2iEm|E@UTTm@(qoK+D7b> z-YH-JZ{{eReOS5NiF5pkaFTc8%hwzS)_fhv!2AoyJL)Y|bmo@wI5vqim_z1gHscLZ zPFcoMemb1)Qh_H{#K`|`IqWWQY9~!p5Pv>df3Ez^g=(im6=IO@YD9)NdyPC zhjpwpdei=1+IIEyZGEe!c`&$FR5_rY-YBXPELPc(Pu>|~YZJghHylo_GWK47G)hRH zY3c3eB-gC>TMres&me&#?SS)6-KA)p=;>oI7&Ub``o+qK~c)-sHN)))#~jZtQpJMIZTM z&)olPe&!^<*4w!^XLv4pdLfXTP?Z_&NF)G|``abf%zDp1hZERm*9j8NMPEW6B0I3D zjfo0yG=iTR5i+O8G+7T`2(hLy z4fBCd%FX6M?P`}4RxIGD1BpA&l>)z5BN*jWQbFNHS6s5-wmy6}Fwv&dpuc=`*?J1d zzm=iiOG*H{3fkIjA#uHrkIpQ#5&VNiD?ZAHuIF9&LrujNQue~OsBB2|-VfV%?r8p> zh6jKD!?p|5b6;-wTLk*8GA%dB d|DUzq)X+0`B)<7$*tXrTcZGdbzvIBE{{viFZSnvB literal 0 HcmV?d00001 diff --git a/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg b/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg new file mode 100644 index 0000000000..9fc02f9164 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/_assets/icon_s_en.svg @@ -0,0 +1,3 @@ + + + diff --git a/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml b/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml new file mode 100644 index 0000000000..78ab4cb93e --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/_position.yaml @@ -0,0 +1,4 @@ +- google/gemma-7b +- meta/llama2-70b +- mistralai/mixtral-8x7b-instruct-v0.1 +- fuyu-8b diff --git a/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml new file mode 100644 index 0000000000..49749bba90 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/fuyu-8b.yaml @@ -0,0 +1,27 @@ +model: fuyu-8b +label: + zh_Hans: fuyu-8b + en_US: fuyu-8b +model_type: llm +features: + - agent-thought + - vision +model_properties: + mode: chat + context_size: 16000 +parameter_rules: + - name: temperature + use_template: temperature + default: 0.2 + min: 0.1 + max: 1 + - name: top_p + use_template: top_p + default: 0.7 + min: 0.1 + max: 1 + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 1024 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml new file mode 100644 index 0000000000..c50dad4f14 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/gemma-7b.yaml @@ -0,0 +1,30 @@ +model: google/gemma-7b +label: + zh_Hans: google/gemma-7b + en_US: google/gemma-7b +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 8192 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml b/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml new file mode 100644 index 0000000000..46422cbdb6 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/llama2-70b.yaml @@ -0,0 +1,30 @@ +model: meta/llama2-70b +label: + zh_Hans: meta/llama2-70b + en_US: meta/llama2-70b +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py new file mode 100644 index 0000000000..5d05e606b0 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py @@ -0,0 +1,247 @@ +import json +from collections.abc import Generator +from typing import Optional, Union + +import requests +from yarl import URL + +from core.model_runtime.entities.llm_entities import LLMMode, LLMResult +from core.model_runtime.entities.message_entities import ( + PromptMessage, + PromptMessageContentType, + PromptMessageFunction, + PromptMessageTool, + UserPromptMessage, +) +from core.model_runtime.errors.invoke import InvokeError +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel +from core.model_runtime.utils import helper + + +class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel): + MODEL_SUFFIX_MAP = { + 'fuyu-8b': 'vlm/adept/fuyu-8b', + 'mistralai/mixtral-8x7b-instruct-v0.1': '', + 'google/gemma-7b': '', + 'meta/llama2-70b': '' + } + + def _invoke(self, model: str, credentials: dict, + prompt_messages: list[PromptMessage], model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, + stream: bool = True, user: Optional[str] = None) \ + -> Union[LLMResult, Generator]: + + self._add_custom_parameters(credentials, model) + prompt_messages = self._transform_prompt_messages(prompt_messages) + stop = [] + user = None + + return super()._invoke(model, credentials, prompt_messages, model_parameters, tools, stop, stream, user) + + def _transform_prompt_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + Handle Image transform + """ + for i, p in enumerate(prompt_messages): + if isinstance(p, UserPromptMessage) and isinstance(p.content, list): + content = p.content + content_text = '' + for prompt_content in content: + if prompt_content.type == PromptMessageContentType.TEXT: + content_text += prompt_content.data + else: + content_text += f' ' + + prompt_message = UserPromptMessage( + content=content_text + ) + prompt_messages[i] = prompt_message + return prompt_messages + + def validate_credentials(self, model: str, credentials: dict) -> None: + self._add_custom_parameters(credentials, model) + self._validate_credentials(model, credentials) + + def _add_custom_parameters(self, credentials: dict, model: str) -> None: + credentials['mode'] = 'chat' + + if self.MODEL_SUFFIX_MAP[model]: + credentials['server_url'] = f'https://ai.api.nvidia.com/v1/{self.MODEL_SUFFIX_MAP[model]}' + credentials.pop('endpoint_url') + else: + credentials['endpoint_url'] = 'https://integrate.api.nvidia.com/v1' + + credentials['stream_mode_delimiter'] = '\n' + + def _validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + headers = { + 'Content-Type': 'application/json' + } + + api_key = credentials.get('api_key') + if api_key: + headers["Authorization"] = f"Bearer {api_key}" + + endpoint_url = credentials['endpoint_url'] if 'endpoint_url' in credentials else None + if endpoint_url and not endpoint_url.endswith('/'): + endpoint_url += '/' + server_url = credentials['server_url'] if 'server_url' in credentials else None + + # prepare the payload for a simple ping to the model + data = { + 'model': model, + 'max_tokens': 5 + } + + completion_type = LLMMode.value_of(credentials['mode']) + + if completion_type is LLMMode.CHAT: + data['messages'] = [ + { + "role": "user", + "content": "ping" + }, + ] + if 'endpoint_url' in credentials: + endpoint_url = str(URL(endpoint_url) / 'chat' / 'completions') + elif 'server_url' in credentials: + endpoint_url = server_url + elif completion_type is LLMMode.COMPLETION: + data['prompt'] = 'ping' + if 'endpoint_url' in credentials: + endpoint_url = str(URL(endpoint_url) / 'completions') + elif 'server_url' in credentials: + endpoint_url = server_url + else: + raise ValueError("Unsupported completion type for model configuration.") + + # send a post request to validate the credentials + response = requests.post( + endpoint_url, + headers=headers, + json=data, + timeout=(10, 60) + ) + + if response.status_code != 200: + raise CredentialsValidateFailedError( + f'Credentials validation failed with status code {response.status_code}') + + try: + json_result = response.json() + except json.JSONDecodeError as e: + raise CredentialsValidateFailedError('Credentials validation failed: JSON decode error') + except CredentialsValidateFailedError: + raise + except Exception as ex: + raise CredentialsValidateFailedError(f'An error occurred during credentials validation: {str(ex)}') + + def _generate(self, model: str, credentials: dict, prompt_messages: list[PromptMessage], model_parameters: dict, + tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None, + stream: bool = True, \ + user: Optional[str] = None) -> Union[LLMResult, Generator]: + """ + Invoke llm completion model + + :param model: model name + :param credentials: credentials + :param prompt_messages: prompt messages + :param model_parameters: model parameters + :param stop: stop words + :param stream: is stream response + :param user: unique user id + :return: full response or stream response chunk generator result + """ + headers = { + 'Content-Type': 'application/json', + 'Accept-Charset': 'utf-8', + } + + api_key = credentials.get('api_key') + if api_key: + headers['Authorization'] = f'Bearer {api_key}' + + if stream: + headers['Accept'] = 'text/event-stream' + + endpoint_url = credentials['endpoint_url'] if 'endpoint_url' in credentials else None + if endpoint_url and not endpoint_url.endswith('/'): + endpoint_url += '/' + server_url = credentials['server_url'] if 'server_url' in credentials else None + + data = { + "model": model, + "stream": stream, + **model_parameters + } + + completion_type = LLMMode.value_of(credentials['mode']) + + if completion_type is LLMMode.CHAT: + if 'endpoint_url' in credentials: + endpoint_url = str(URL(endpoint_url) / 'chat' / 'completions') + elif 'server_url' in credentials: + endpoint_url = server_url + data['messages'] = [self._convert_prompt_message_to_dict(m) for m in prompt_messages] + elif completion_type is LLMMode.COMPLETION: + data['prompt'] = 'ping' + if 'endpoint_url' in credentials: + endpoint_url = str(URL(endpoint_url) / 'completions') + elif 'server_url' in credentials: + endpoint_url = server_url + else: + raise ValueError("Unsupported completion type for model configuration.") + + + # annotate tools with names, descriptions, etc. + function_calling_type = credentials.get('function_calling_type', 'no_call') + formatted_tools = [] + if tools: + if function_calling_type == 'function_call': + data['functions'] = [{ + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters + } for tool in tools] + elif function_calling_type == 'tool_call': + data["tool_choice"] = "auto" + + for tool in tools: + formatted_tools.append(helper.dump_model(PromptMessageFunction(function=tool))) + + data["tools"] = formatted_tools + + if stop: + data["stop"] = stop + + if user: + data["user"] = user + + response = requests.post( + endpoint_url, + headers=headers, + json=data, + timeout=(10, 60), + stream=stream + ) + + if response.encoding is None or response.encoding == 'ISO-8859-1': + response.encoding = 'utf-8' + + if not response.ok: + raise InvokeError(f"API request failed with status code {response.status_code}: {response.text}") + + if stream: + return self._handle_generate_stream_response(model, credentials, response, prompt_messages) + + return self._handle_generate_response(model, credentials, response, prompt_messages) diff --git a/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml b/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml new file mode 100644 index 0000000000..fbd8cc268e --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/llm/mistralai_mixtral-8x7b-instruct-v0.1.yaml @@ -0,0 +1,30 @@ +model: mistralai/mixtral-8x7b-instruct-v0.1 +label: + zh_Hans: mistralai/mixtral-8x7b-instruct-v0.1 + en_US: mistralai/mixtral-8x7b-instruct-v0.1 +model_type: llm +features: + - agent-thought +model_properties: + mode: chat + context_size: 32768 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 512 + min: 1 + max: 1024 + - name: frequency_penalty + use_template: frequency_penalty + min: -2 + max: 2 + default: 0 + - name: presence_penalty + use_template: presence_penalty + min: -2 + max: 2 + default: 0 diff --git a/api/core/model_runtime/model_providers/nvidia/nvidia.py b/api/core/model_runtime/model_providers/nvidia/nvidia.py new file mode 100644 index 0000000000..e83f8badb5 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/nvidia.py @@ -0,0 +1,30 @@ +import logging + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.model_provider import ModelProvider + +logger = logging.getLogger(__name__) + + +class MistralAIProvider(ModelProvider): + + def validate_provider_credentials(self, credentials: dict) -> None: + """ + Validate provider credentials + if validate failed, raise exception + + :param credentials: provider credentials, credentials form defined in `provider_credential_schema`. + """ + try: + model_instance = self.get_model_instance(ModelType.LLM) + + model_instance.validate_credentials( + model='mistralai/mixtral-8x7b-instruct-v0.1', + credentials=credentials + ) + except CredentialsValidateFailedError as ex: + raise ex + except Exception as ex: + logger.exception(f'{self.get_provider_schema().provider} credentials validate failed') + raise ex diff --git a/api/core/model_runtime/model_providers/nvidia/nvidia.yaml b/api/core/model_runtime/model_providers/nvidia/nvidia.yaml new file mode 100644 index 0000000000..c3c316321e --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/nvidia.yaml @@ -0,0 +1,30 @@ +provider: nvidia +label: + en_US: NVIDIA +icon_small: + en_US: icon_s_en.svg +icon_large: + en_US: icon_l_en.png +background: "#FFFFFF" +help: + title: + en_US: Get your API Key from NVIDIA + zh_Hans: 从 NVIDIA 获取 API Key + url: + en_US: https://build.nvidia.com/explore/discover +supported_model_types: + - llm + - text-embedding + - rerank +configurate_methods: + - predefined-model +provider_credential_schema: + credential_form_schemas: + - variable: api_key + label: + en_US: API Key + type: secret-input + required: true + placeholder: + zh_Hans: 在此输入您的 API Key + en_US: Enter your API Key diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/__init__.py b/api/core/model_runtime/model_providers/nvidia/rerank/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml b/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml new file mode 100644 index 0000000000..7703ca21ab --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/rerank/rerank-qa-mistral-4b.yaml @@ -0,0 +1,4 @@ +model: nv-rerank-qa-mistral-4b:1 +model_type: rerank +model_properties: + context_size: 8192 diff --git a/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py b/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py new file mode 100644 index 0000000000..9d33f55bc2 --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/rerank/rerank.py @@ -0,0 +1,112 @@ +from math import exp +from typing import Optional + +import requests + +from core.model_runtime.entities.rerank_entities import RerankDocument, RerankResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.rerank_model import RerankModel + + +class NvidiaRerankModel(RerankModel): + """ + Model class for NVIDIA rerank model. + """ + + def _sigmoid(self, logit: float) -> float: + return 1/(1+exp(-logit)) + + def _invoke(self, model: str, credentials: dict, + query: str, docs: list[str], score_threshold: Optional[float] = None, top_n: Optional[int] = None, + user: Optional[str] = None) -> RerankResult: + """ + Invoke rerank model + + :param model: model name + :param credentials: model credentials + :param query: search query + :param docs: docs for reranking + :param score_threshold: score threshold + :param top_n: top n documents to return + :param user: unique user id + :return: rerank result + """ + if len(docs) == 0: + return RerankResult(model=model, docs=[]) + + try: + invoke_url = "https://ai.api.nvidia.com/v1/retrieval/nvidia/reranking" + + headers = { + "Authorization": f"Bearer {credentials.get('api_key')}", + "Accept": "application/json", + } + payload = { + "model": model, + "query": {"text": query}, + "passages": [{"text": doc} for doc in docs], + } + + session = requests.Session() + response = session.post(invoke_url, headers=headers, json=payload) + response.raise_for_status() + results = response.json() + + rerank_documents = [] + for result in results['rankings']: + index = result['index'] + logit = result['logit'] + rerank_document = RerankDocument( + index=index, + text=docs[index], + score=self._sigmoid(logit), + ) + + rerank_documents.append(rerank_document) + + return RerankResult(model=model, docs=rerank_documents) + except requests.HTTPError as e: + raise InvokeServerUnavailableError(str(e)) + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke( + model=model, + credentials=credentials, + query="What is the GPU memory bandwidth of H100 SXM?", + docs=[ + "Example doc 1", + "Example doc 2", + "Example doc 3", + ], + ) + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + """ + Map model invoke error to unified error + """ + return { + InvokeConnectionError: [requests.ConnectionError], + InvokeServerUnavailableError: [requests.HTTPError], + InvokeRateLimitError: [], + InvokeAuthorizationError: [requests.HTTPError], + InvokeBadRequestError: [requests.RequestException] + } diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/__init__.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml b/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml new file mode 100644 index 0000000000..a9b5e25c3c --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/text_embedding/embed-qa-4.yaml @@ -0,0 +1,5 @@ +model: NV-Embed-QA +model_type: text-embedding +model_properties: + context_size: 512 + max_chunks: 1 diff --git a/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py new file mode 100644 index 0000000000..a2adef400d --- /dev/null +++ b/api/core/model_runtime/model_providers/nvidia/text_embedding/text_embedding.py @@ -0,0 +1,172 @@ +import time +from json import JSONDecodeError, dumps +from typing import Optional + +from requests import post + +from core.model_runtime.entities.model_entities import PriceType +from core.model_runtime.entities.text_embedding_entities import EmbeddingUsage, TextEmbeddingResult +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) +from core.model_runtime.errors.validate import CredentialsValidateFailedError +from core.model_runtime.model_providers.__base.text_embedding_model import TextEmbeddingModel + + +class NvidiaTextEmbeddingModel(TextEmbeddingModel): + """ + Model class for Nvidia text embedding model. + """ + api_base: str = 'https://ai.api.nvidia.com/v1/retrieval/nvidia/embeddings' + models: list[str] = ['NV-Embed-QA'] + + def _invoke(self, model: str, credentials: dict, + texts: list[str], user: Optional[str] = None) \ + -> TextEmbeddingResult: + """ + Invoke text embedding model + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :param user: unique user id + :return: embeddings result + """ + api_key = credentials['api_key'] + if model not in self.models: + raise InvokeBadRequestError('Invalid model name') + if not api_key: + raise CredentialsValidateFailedError('api_key is required') + url = self.api_base + headers = { + 'Authorization': 'Bearer ' + api_key, + 'Content-Type': 'application/json' + } + + data = { + 'model': model, + 'input': texts[0], + 'input_type': 'query' + } + + try: + response = post(url, headers=headers, data=dumps(data)) + except Exception as e: + raise InvokeConnectionError(str(e)) + + if response.status_code != 200: + try: + resp = response.json() + msg = resp['detail'] + if response.status_code == 401: + raise InvokeAuthorizationError(msg) + elif response.status_code == 429: + raise InvokeRateLimitError(msg) + elif response.status_code == 500: + raise InvokeServerUnavailableError(msg) + else: + raise InvokeError(msg) + except JSONDecodeError as e: + raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") + + try: + resp = response.json() + embeddings = resp['data'] + usage = resp['usage'] + except Exception as e: + raise InvokeServerUnavailableError(f"Failed to convert response to json: {e} with text: {response.text}") + + usage = self._calc_response_usage(model=model, credentials=credentials, tokens=usage['total_tokens']) + + result = TextEmbeddingResult( + model=model, + embeddings=[[ + float(data) for data in x['embedding'] + ] for x in embeddings], + usage=usage + ) + + return result + + def get_num_tokens(self, model: str, credentials: dict, texts: list[str]) -> int: + """ + Get number of tokens for given prompt messages + + :param model: model name + :param credentials: model credentials + :param texts: texts to embed + :return: + """ + num_tokens = 0 + for text in texts: + # use JinaTokenizer to get num tokens + num_tokens += self._get_num_tokens_by_gpt2(text) + return num_tokens + + def validate_credentials(self, model: str, credentials: dict) -> None: + """ + Validate model credentials + + :param model: model name + :param credentials: model credentials + :return: + """ + try: + self._invoke(model=model, credentials=credentials, texts=['ping']) + except InvokeAuthorizationError: + raise CredentialsValidateFailedError('Invalid api key') + + @property + def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]: + return { + InvokeConnectionError: [ + InvokeConnectionError + ], + InvokeServerUnavailableError: [ + InvokeServerUnavailableError + ], + InvokeRateLimitError: [ + InvokeRateLimitError + ], + InvokeAuthorizationError: [ + InvokeAuthorizationError + ], + InvokeBadRequestError: [ + KeyError + ] + } + + def _calc_response_usage(self, model: str, credentials: dict, tokens: int) -> EmbeddingUsage: + """ + Calculate response usage + + :param model: model name + :param credentials: model credentials + :param tokens: input tokens + :return: usage + """ + # get input price info + input_price_info = self.get_price( + model=model, + credentials=credentials, + price_type=PriceType.INPUT, + tokens=tokens + ) + + # transform usage + usage = EmbeddingUsage( + tokens=tokens, + total_tokens=tokens, + unit_price=input_price_info.unit_price, + price_unit=input_price_info.unit, + total_price=input_price_info.total_amount, + currency=input_price_info.currency, + latency=time.perf_counter() - self.started_at + ) + + return usage From 11636bc7c744d457833cbdd3b21c34b6ad35c09d Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 19 Mar 2024 21:35:58 +0800 Subject: [PATCH 17/18] bump version to 0.5.10 (#2902) --- api/config.py | 2 +- docker/docker-compose.yaml | 6 +++--- web/package.json | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/config.py b/api/config.py index a978a099b9..3d2d99ec9c 100644 --- a/api/config.py +++ b/api/config.py @@ -90,7 +90,7 @@ class Config: # ------------------------ # General Configurations. # ------------------------ - self.CURRENT_VERSION = "0.5.9" + self.CURRENT_VERSION = "0.5.10" self.COMMIT_SHA = get_env('COMMIT_SHA') self.EDITION = "SELF_HOSTED" self.DEPLOY_ENV = get_env('DEPLOY_ENV') diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index d627bb3848..101f780bef 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3.1' services: # API service api: - image: langgenius/dify-api:0.5.9 + image: langgenius/dify-api:0.5.10 restart: always environment: # Startup mode, 'api' starts the API server. @@ -135,7 +135,7 @@ services: # worker service # The Celery worker for processing the queue. worker: - image: langgenius/dify-api:0.5.9 + image: langgenius/dify-api:0.5.10 restart: always environment: # Startup mode, 'worker' starts the Celery worker for processing the queue. @@ -206,7 +206,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:0.5.9 + image: langgenius/dify-web:0.5.10 restart: always environment: EDITION: SELF_HOSTED diff --git a/web/package.json b/web/package.json index fc466f42b3..513efdc657 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "dify-web", - "version": "0.5.9", + "version": "0.5.10", "private": true, "scripts": { "dev": "next dev", From d018e279f895a409edf835985393b9c67561147e Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Tue, 19 Mar 2024 22:21:58 +0800 Subject: [PATCH 18/18] fix: typo $ mark in logs of vdb migrate command (#2901) --- api/commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/commands.py b/api/commands.py index 250039a365..b82d4d5d5d 100644 --- a/api/commands.py +++ b/api/commands.py @@ -254,7 +254,7 @@ def migrate_knowledge_vector_database(): for dataset in datasets: total_count = total_count + 1 click.echo(f'Processing the {total_count} dataset {dataset.id}. ' - + f'{create_count} created, ${skipped_count} skipped.') + + f'{create_count} created, {skipped_count} skipped.') try: click.echo('Create dataset vdb index: {}'.format(dataset.id)) if dataset.index_struct_dict: